ethdev: fix RSS update when RSS is disabled
[dpdk.git] / lib / ethdev / ethdev_private.c
index c905c2d..48090c8 100644 (file)
@@ -2,10 +2,23 @@
  * Copyright(c) 2018 GaĆ«tan Rivet
  */
 
+#include <rte_debug.h>
+
 #include "rte_ethdev.h"
 #include "ethdev_driver.h"
 #include "ethdev_private.h"
 
+static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
+
+/* Shared memory between primary and secondary processes. */
+struct eth_dev_shared *eth_dev_shared_data;
+
+/* spinlock for shared data allocation */
+static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
+
+/* spinlock for eth device callbacks */
+rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
+
 uint16_t
 eth_dev_to_id(const struct rte_eth_dev *dev)
 {
@@ -175,22 +188,58 @@ done:
        return str == NULL ? -1 : 0;
 }
 
+struct dummy_queue {
+       bool rx_warn_once;
+       bool tx_warn_once;
+};
+static struct dummy_queue *dummy_queues_array[RTE_MAX_ETHPORTS][RTE_MAX_QUEUES_PER_PORT];
+static struct dummy_queue per_port_queues[RTE_MAX_ETHPORTS];
+RTE_INIT(dummy_queue_init)
+{
+       uint16_t port_id;
+
+       for (port_id = 0; port_id < RTE_DIM(per_port_queues); port_id++) {
+               unsigned int q;
+
+               for (q = 0; q < RTE_DIM(dummy_queues_array[port_id]); q++)
+                       dummy_queues_array[port_id][q] = &per_port_queues[port_id];
+       }
+}
+
 static uint16_t
-dummy_eth_rx_burst(__rte_unused void *rxq,
+dummy_eth_rx_burst(void *rxq,
                __rte_unused struct rte_mbuf **rx_pkts,
                __rte_unused uint16_t nb_pkts)
 {
-       RTE_ETHDEV_LOG(ERR, "rx_pkt_burst for not ready port\n");
+       struct dummy_queue *queue = rxq;
+       uintptr_t port_id;
+
+       port_id = queue - per_port_queues;
+       if (port_id < RTE_DIM(per_port_queues) && !queue->rx_warn_once) {
+               RTE_ETHDEV_LOG(ERR, "lcore %u called rx_pkt_burst for not ready port %"PRIuPTR"\n",
+                       rte_lcore_id(), port_id);
+               rte_dump_stack();
+               queue->rx_warn_once = true;
+       }
        rte_errno = ENOTSUP;
        return 0;
 }
 
 static uint16_t
-dummy_eth_tx_burst(__rte_unused void *txq,
+dummy_eth_tx_burst(void *txq,
                __rte_unused struct rte_mbuf **tx_pkts,
                __rte_unused uint16_t nb_pkts)
 {
-       RTE_ETHDEV_LOG(ERR, "tx_pkt_burst for not ready port\n");
+       struct dummy_queue *queue = txq;
+       uintptr_t port_id;
+
+       port_id = queue - per_port_queues;
+       if (port_id < RTE_DIM(per_port_queues) && !queue->tx_warn_once) {
+               RTE_ETHDEV_LOG(ERR, "lcore %u called tx_pkt_burst for not ready port %"PRIuPTR"\n",
+                       rte_lcore_id(), port_id);
+               rte_dump_stack();
+               queue->tx_warn_once = true;
+       }
        rte_errno = ENOTSUP;
        return 0;
 }
@@ -199,14 +248,22 @@ void
 eth_dev_fp_ops_reset(struct rte_eth_fp_ops *fpo)
 {
        static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
-       static const struct rte_eth_fp_ops dummy_ops = {
+       uintptr_t port_id = fpo - rte_eth_fp_ops;
+
+       per_port_queues[port_id].rx_warn_once = false;
+       per_port_queues[port_id].tx_warn_once = false;
+       *fpo = (struct rte_eth_fp_ops) {
                .rx_pkt_burst = dummy_eth_rx_burst,
                .tx_pkt_burst = dummy_eth_tx_burst,
-               .rxq = {.data = dummy_data, .clbk = dummy_data,},
-               .txq = {.data = dummy_data, .clbk = dummy_data,},
+               .rxq = {
+                       .data = (void **)&dummy_queues_array[port_id],
+                       .clbk = dummy_data,
+               },
+               .txq = {
+                       .data = (void **)&dummy_queues_array[port_id],
+                       .clbk = dummy_data,
+               },
        };
-
-       *fpo = dummy_ops;
 }
 
 void
@@ -257,3 +314,121 @@ rte_eth_call_tx_callbacks(uint16_t port_id, uint16_t queue_id,
 
        return nb_pkts;
 }
+
+void
+eth_dev_shared_data_prepare(void)
+{
+       const unsigned int flags = 0;
+       const struct rte_memzone *mz;
+
+       rte_spinlock_lock(&eth_dev_shared_data_lock);
+
+       if (eth_dev_shared_data == NULL) {
+               if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+                       /* Allocate port data and ownership shared memory. */
+                       mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
+                                       sizeof(*eth_dev_shared_data),
+                                       rte_socket_id(), flags);
+               } else
+                       mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
+               if (mz == NULL)
+                       rte_panic("Cannot allocate ethdev shared data\n");
+
+               eth_dev_shared_data = mz->addr;
+               if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+                       eth_dev_shared_data->next_owner_id =
+                                       RTE_ETH_DEV_NO_OWNER + 1;
+                       rte_spinlock_init(&eth_dev_shared_data->ownership_lock);
+                       memset(eth_dev_shared_data->data, 0,
+                              sizeof(eth_dev_shared_data->data));
+               }
+       }
+
+       rte_spinlock_unlock(&eth_dev_shared_data_lock);
+}
+
+void
+eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       void **rxq = dev->data->rx_queues;
+
+       if (rxq[qid] == NULL)
+               return;
+
+       if (dev->dev_ops->rx_queue_release != NULL)
+               (*dev->dev_ops->rx_queue_release)(dev, qid);
+       rxq[qid] = NULL;
+}
+
+void
+eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       void **txq = dev->data->tx_queues;
+
+       if (txq[qid] == NULL)
+               return;
+
+       if (dev->dev_ops->tx_queue_release != NULL)
+               (*dev->dev_ops->tx_queue_release)(dev, qid);
+       txq[qid] = NULL;
+}
+
+int
+eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t old_nb_queues = dev->data->nb_rx_queues;
+       unsigned int i;
+
+       if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
+               dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+                               sizeof(dev->data->rx_queues[0]) *
+                               RTE_MAX_QUEUES_PER_PORT,
+                               RTE_CACHE_LINE_SIZE);
+               if (dev->data->rx_queues == NULL) {
+                       dev->data->nb_rx_queues = 0;
+                       return -(ENOMEM);
+               }
+       } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       eth_dev_rxq_release(dev, i);
+
+       } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       eth_dev_rxq_release(dev, i);
+
+               rte_free(dev->data->rx_queues);
+               dev->data->rx_queues = NULL;
+       }
+       dev->data->nb_rx_queues = nb_queues;
+       return 0;
+}
+
+int
+eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+       uint16_t old_nb_queues = dev->data->nb_tx_queues;
+       unsigned int i;
+
+       if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
+               dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+                               sizeof(dev->data->tx_queues[0]) *
+                               RTE_MAX_QUEUES_PER_PORT,
+                               RTE_CACHE_LINE_SIZE);
+               if (dev->data->tx_queues == NULL) {
+                       dev->data->nb_tx_queues = 0;
+                       return -(ENOMEM);
+               }
+       } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       eth_dev_txq_release(dev, i);
+
+       } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
+               for (i = nb_queues; i < old_nb_queues; i++)
+                       eth_dev_txq_release(dev, i);
+
+               rte_free(dev->data->tx_queues);
+               dev->data->tx_queues = NULL;
+       }
+       dev->data->nb_tx_queues = nb_queues;
+       return 0;
+}