* Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
* are met:
*
- * * Redistributions of source code must retain the above copyright
+ * * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
*/
#include <sys/types.h>
return (0);
}
+static int
+rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
+ const struct rte_eth_conf *dev_conf)
+{
+ struct rte_eth_dev *dev = &rte_eth_devices[port_id];
+
+ if (RTE_ETH_DEV_SRIOV(dev).active != 0) {
+ /* check multi-queue mode */
+ if ((dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ||
+ (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) ||
+ (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
+ (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
+ /* SRIOV only works in VMDq enable mode */
+ PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
+ "wrong VMDQ mq_mode rx %d tx %d\n",
+ port_id, dev_conf->rxmode.mq_mode,
+ dev_conf->txmode.mq_mode);
+ return (-EINVAL);
+ }
+
+ switch (dev_conf->rxmode.mq_mode) {
+ case ETH_MQ_RX_VMDQ_RSS:
+ case ETH_MQ_RX_VMDQ_DCB:
+ case ETH_MQ_RX_VMDQ_DCB_RSS:
+ /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
+ PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
+ "unsupported VMDQ mq_mode rx %d\n",
+ port_id, dev_conf->rxmode.mq_mode);
+ return (-EINVAL);
+ default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ }
+
+ switch (dev_conf->txmode.mq_mode) {
+ case ETH_MQ_TX_VMDQ_DCB:
+ /* DCB VMDQ in SRIOV mode, not implement yet */
+ PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
+ "unsupported VMDQ mq_mode tx %d\n",
+ port_id, dev_conf->txmode.mq_mode);
+ return (-EINVAL);
+ default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
+ /* if nothing mq mode configure, use default scheme */
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
+ if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
+ break;
+ }
+
+ /* check valid queue number */
+ if ((nb_rx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool) ||
+ (nb_tx_q > RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
+ "queue number must less equal to %d\n",
+ port_id, RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool);
+ return (-EINVAL);
+ }
+ } else {
+ /* For vmdb+dcb mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_conf *conf;
+
+ if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
+ "!= %d\n",
+ port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
+ if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+ "nb_queue_pools must be %d or %d\n",
+ port_id, ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ }
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+ const struct rte_eth_vmdq_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_VMDQ_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
+ if (! (conf->nb_queue_pools == ETH_16_POOLS ||
+ conf->nb_queue_pools == ETH_32_POOLS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
+ "nb_queue_pools != %d or nb_queue_pools "
+ "!= %d\n",
+ port_id, ETH_16_POOLS, ETH_32_POOLS);
+ return (-EINVAL);
+ }
+ }
+
+ /* For DCB mode check our configuration before we go further */
+ if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) {
+ const struct rte_eth_dcb_rx_conf *conf;
+
+ if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+
+ if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+ const struct rte_eth_dcb_tx_conf *conf;
+
+ if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
+ "!= %d\n",
+ port_id, ETH_DCB_NUM_QUEUES);
+ return (-EINVAL);
+ }
+ conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
+ if (! (conf->nb_tcs == ETH_4_TCS ||
+ conf->nb_tcs == ETH_8_TCS)) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
+ "nb_tcs != %d or nb_tcs "
+ "!= %d\n",
+ port_id, ETH_4_TCS, ETH_8_TCS);
+ return (-EINVAL);
+ }
+ }
+ }
+ return 0;
+}
+
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
/* Use default value */
dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
- /* For vmdb+dcb mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_VMDQ_DCB) {
- const struct rte_eth_vmdq_dcb_conf *conf;
-
- if (nb_rx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.vmdq_dcb_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools must be %d or %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
- if (dev_conf->txmode.mq_mode == ETH_VMDQ_DCB_TX) {
- const struct rte_eth_vmdq_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_VMDQ_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_VMDQ_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.vmdq_dcb_tx_conf);
- if (! (conf->nb_queue_pools == ETH_16_POOLS ||
- conf->nb_queue_pools == ETH_32_POOLS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d VMDQ+DCB selected, "
- "nb_queue_pools != %d or nb_queue_pools "
- "!= %d\n",
- port_id, ETH_16_POOLS, ETH_32_POOLS);
- return (-EINVAL);
- }
- }
-
- /* For DCB mode check our configuration before we go further */
- if (dev_conf->rxmode.mq_mode == ETH_DCB_RX) {
- const struct rte_eth_dcb_rx_conf *conf;
-
- if (nb_rx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_rx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->rx_adv_conf.dcb_rx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
- }
-
- if (dev_conf->txmode.mq_mode == ETH_DCB_TX) {
- const struct rte_eth_dcb_tx_conf *conf;
-
- if (nb_tx_q != ETH_DCB_NUM_QUEUES) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB, nb_tx_q "
- "!= %d\n",
- port_id, ETH_DCB_NUM_QUEUES);
- return (-EINVAL);
- }
- conf = &(dev_conf->tx_adv_conf.dcb_tx_conf);
- if (! (conf->nb_tcs == ETH_4_TCS ||
- conf->nb_tcs == ETH_8_TCS)) {
- PMD_DEBUG_TRACE("ethdev port_id=%d DCB selected, "
- "nb_tcs != %d or nb_tcs "
- "!= %d\n",
- port_id, ETH_4_TCS, ETH_8_TCS);
- return (-EINVAL);
- }
+ /* multipe queue mode checking */
+ diag = rte_eth_dev_check_mq_mode(port_id, nb_rx_q, nb_tx_q, dev_conf);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_check_mq_mode = %d\n",
+ port_id, diag);
+ return diag;
}
/*
struct rte_eth_dev_info dev_info;
struct ether_addr addr;
uint16_t i;
+ uint32_t pool = 0;
dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
+ if (RTE_ETH_DEV_SRIOV(dev).active)
+ pool = RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx;
+
/* replay MAC address configuration */
for (i = 0; i < dev_info.max_mac_addrs; i++) {
addr = dev->data->mac_addrs[i];
/* add address to the hardware */
if (*dev->dev_ops->mac_addr_add)
- (*dev->dev_ops->mac_addr_add)(dev, &addr, i, 0);
+ (*dev->dev_ops->mac_addr_add)(dev, &addr, i, pool);
else {
PMD_DEBUG_TRACE("port %d: MAC address array not supported\n",
port_id);
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
tx_pkts, nb_pkts);
}
+
+uint32_t
+rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return 0;
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+}
+
+int
+rte_eth_rx_descriptor_done(uint8_t port_id, uint16_t queue_id, uint16_t offset)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_descriptor_done, -ENOTSUP);
+ return (*dev->dev_ops->rx_descriptor_done)( \
+ dev->data->rx_queues[queue_id], offset);
+}
#endif
int