ethdev: check RETA queue indices against number of queues
[dpdk.git] / lib / librte_ether / rte_ethdev.c
index 5ee2030..473c98b 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  * 
- *   Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
  *   All rights reserved.
  * 
  *   Redistribution and use in source and binary forms, with or without
@@ -261,9 +261,7 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
        void **rxq;
        unsigned i;
 
-       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
-       if (dev->data->rx_queues == NULL) {
+       if (dev->data->rx_queues == NULL) { /* first time configuration */
                dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
                                sizeof(dev->data->rx_queues[0]) * nb_queues,
                                CACHE_LINE_SIZE);
@@ -271,7 +269,9 @@ rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
                        dev->data->nb_rx_queues = 0;
                        return -(ENOMEM);
                }
-       } else {
+       } else { /* re-configure */
+               FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
                rxq = dev->data->rx_queues;
 
                for (i = nb_queues; i < old_nb_queues; i++)
@@ -299,9 +299,7 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
        void **txq;
        unsigned i;
 
-       FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
-       if (dev->data->tx_queues == NULL) {
+       if (dev->data->tx_queues == NULL) { /* first time configuration */
                dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
                                sizeof(dev->data->tx_queues[0]) * nb_queues,
                                CACHE_LINE_SIZE);
@@ -309,7 +307,9 @@ rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
                        dev->data->nb_tx_queues = 0;
                        return -(ENOMEM);
                }
-       } else {
+       } else { /* re-configure */
+               FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
                txq = dev->data->tx_queues;
 
                for (i = nb_queues; i < old_nb_queues; i++)
@@ -343,9 +343,10 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                    (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
                    (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
                        /* SRIOV only works in VMDq enable mode */
-                       PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
-                                       "wrong VMDQ mq_mode rx %d tx %d\n", 
-                                       port_id, dev_conf->rxmode.mq_mode,
+                       PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+                                       "wrong VMDQ mq_mode rx %u tx %u\n", 
+                                       port_id,
+                                       dev_conf->rxmode.mq_mode,
                                        dev_conf->txmode.mq_mode);
                        return (-EINVAL);
                }
@@ -355,8 +356,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                case ETH_MQ_RX_VMDQ_DCB:
                case ETH_MQ_RX_VMDQ_DCB_RSS:
                        /* DCB/RSS VMDQ in SRIOV mode, not implement yet */
-                       PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
-                                       "unsupported VMDQ mq_mode rx %d\n", 
+                       PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+                                       "unsupported VMDQ mq_mode rx %u\n", 
                                        port_id, dev_conf->rxmode.mq_mode);
                        return (-EINVAL);
                default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
@@ -370,8 +371,8 @@ rte_eth_dev_check_mq_mode(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                switch (dev_conf->txmode.mq_mode) {
                case ETH_MQ_TX_VMDQ_DCB:
                        /* DCB VMDQ in SRIOV mode, not implement yet */
-                       PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
-                                       "unsupported VMDQ mq_mode tx %d\n", 
+                       PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+                                       "unsupported VMDQ mq_mode tx %u\n", 
                                        port_id, dev_conf->txmode.mq_mode);
                        return (-EINVAL);
                default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
@@ -756,8 +757,7 @@ rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
                                (int) sizeof(struct rte_pktmbuf_pool_private));
                return (-ENOSPC);
        }
-       mbp_priv = (struct rte_pktmbuf_pool_private *)
-               ((char *)mp + sizeof(struct rte_mempool));
+       mbp_priv = rte_mempool_get_priv(mp);
        if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
            dev_info.min_rx_bufsize) {
                PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
@@ -1034,6 +1034,10 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info)
        }
        dev = &rte_eth_devices[port_id];
 
+       /* Default device offload capabilities to zero */
+       dev_info->rx_offload_capa = 0;
+       dev_info->tx_offload_capa = 0;
+       dev_info->if_index = 0;
        FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
        (*dev->dev_ops->dev_infos_get)(dev, dev_info);
        dev_info->pci_dev = dev->pci_dev;
@@ -1497,6 +1501,7 @@ int
 rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
 {
        struct rte_eth_dev *dev;
+       uint16_t max_rxq;
        uint8_t i,j;
 
        if (port_id >= nb_ports) {
@@ -1510,10 +1515,13 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
                return (-EINVAL);
        }
 
+       dev = &rte_eth_devices[port_id];
+       max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
+               dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
        if (reta_conf->mask_lo != 0) {
                for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
                        if ((reta_conf->mask_lo & (1ULL << i)) &&
-                               (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
+                               (reta_conf->reta[i] >= max_rxq)) {
                                PMD_DEBUG_TRACE("RETA hash index output"
                                        "configration for port=%d,invalid"
                                        "queue=%d\n",port_id,reta_conf->reta[i]);
@@ -1529,7 +1537,7 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
 
                        /* Check if the max entry >= 128 */
                        if ((reta_conf->mask_hi & (1ULL << i)) && 
-                               (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
+                               (reta_conf->reta[j] >= max_rxq)) {
                                PMD_DEBUG_TRACE("RETA hash index output"
                                        "configration for port=%d,invalid"
                                        "queue=%d\n",port_id,reta_conf->reta[j]);
@@ -1539,8 +1547,6 @@ rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
                }
        }
 
-       dev = &rte_eth_devices[port_id];
-
        FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
        return (*dev->dev_ops->reta_update)(dev, reta_conf);
 }