net/mlx4: fix Rx interrupts management
[dpdk.git] / drivers / net / mlx4 / mlx4.c
index 0fdba80..ae4be9e 100644 (file)
@@ -75,6 +75,7 @@
 #include <rte_memory.h>
 #include <rte_flow.h>
 #include <rte_kvargs.h>
+#include <rte_interrupts.h>
 
 /* Generated configuration header. */
 #include "mlx4_autoconf.h"
@@ -127,6 +128,18 @@ const char *pmd_mlx4_init_params[] = {
        NULL,
 };
 
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx);
+
+static int
+priv_rx_intr_vec_enable(struct priv *priv);
+
+static void
+priv_rx_intr_vec_disable(struct priv *priv);
+
 /**
  * Check if running as a secondary process.
  *
@@ -1098,7 +1111,7 @@ static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start,
 
 /* For best performance, this function should not be inlined. */
 static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *)
-       __attribute__((noinline));
+       __rte_noinline;
 
 /**
  * Register mempool as a memory region.
@@ -2756,6 +2769,8 @@ rxq_cleanup(struct rxq *rxq)
        }
        if (rxq->cq != NULL)
                claim_zero(ibv_destroy_cq(rxq->cq));
+       if (rxq->channel != NULL)
+               claim_zero(ibv_destroy_comp_channel(rxq->channel));
        if (rxq->rd != NULL) {
                struct ibv_exp_destroy_res_domain_attr attr = {
                        .comp_mask = 0,
@@ -3696,11 +3711,22 @@ skip_mr:
                      (void *)dev, strerror(ret));
                goto error;
        }
+       if (dev->data->dev_conf.intr_conf.rxq) {
+               tmpl.channel = ibv_create_comp_channel(priv->ctx);
+               if (tmpl.channel == NULL) {
+                       ret = ENOMEM;
+                       ERROR("%p: Rx interrupt completion channel creation"
+                             " failure: %s",
+                             (void *)dev, strerror(ret));
+                       goto error;
+               }
+       }
        attr.cq = (struct ibv_exp_cq_init_attr){
                .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
                .res_domain = tmpl.rd,
        };
-       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, tmpl.channel, 0,
+                                   &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -4005,6 +4031,12 @@ mlx4_dev_start(struct rte_eth_dev *dev)
                     (void *)dev);
                goto err;
        }
+       ret = priv_rx_intr_vec_enable(priv);
+       if (ret) {
+               ERROR("%p: Rx interrupt vector creation failed",
+                     (void *)dev);
+               goto err;
+       }
        ret = mlx4_priv_flow_start(priv);
        if (ret) {
                ERROR("%p: flow start failed: %s",
@@ -4197,6 +4229,7 @@ mlx4_dev_close(struct rte_eth_dev *dev)
                assert(priv->ctx == NULL);
        priv_dev_removal_interrupt_handler_uninstall(priv, dev);
        priv_dev_link_interrupt_handler_uninstall(priv, dev);
+       priv_rx_intr_vec_disable(priv);
        priv_unlock(priv);
        memset(priv, 0, sizeof(*priv));
 }
@@ -4300,7 +4333,7 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        unsigned int max;
        char ifname[IF_NAMESIZE];
 
-       info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+       info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (priv == NULL)
                return;
@@ -4503,26 +4536,30 @@ end:
  * @param vmdq
  *   VMDq pool index to associate address with (ignored).
  */
-static void
+static int
 mlx4_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
                  uint32_t index, uint32_t vmdq)
 {
        struct priv *priv = dev->data->dev_private;
+       int re;
 
        if (mlx4_is_secondary())
-               return;
+               return -ENOTSUP;
        (void)vmdq;
        priv_lock(priv);
        DEBUG("%p: adding MAC address at index %" PRIu32,
              (void *)dev, index);
        /* Last array entry is reserved for broadcast. */
-       if (index >= (elemof(priv->mac) - 1))
+       if (index >= (elemof(priv->mac) - 1)) {
+               re = EINVAL;
                goto end;
-       priv_mac_addr_add(priv, index,
-                         (const uint8_t (*)[ETHER_ADDR_LEN])
-                         mac_addr->addr_bytes);
+       }
+       re = priv_mac_addr_add(priv, index,
+                              (const uint8_t (*)[ETHER_ADDR_LEN])
+                              mac_addr->addr_bytes);
 end:
        priv_unlock(priv);
+       return -re;
 }
 
 /**
@@ -5153,6 +5190,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
        .mac_addr_set = mlx4_mac_addr_set,
        .mtu_set = mlx4_dev_set_mtu,
        .filter_ctrl = mlx4_dev_filter_ctrl,
+       .rx_queue_intr_enable = mlx4_rx_intr_enable,
+       .rx_queue_intr_disable = mlx4_rx_intr_disable,
 };
 
 /**
@@ -5192,7 +5231,7 @@ mlx4_ibv_device_to_pci_addr(const struct ibv_device *device,
                /* Extract information. */
                if (sscanf(line,
                           "PCI_SLOT_NAME="
-                          "%" SCNx16 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
+                          "%" SCNx32 ":%" SCNx8 ":%" SCNx8 ".%" SCNx8 "\n",
                           &pci_addr->domain,
                           &pci_addr->bus,
                           &pci_addr->devid,
@@ -5364,7 +5403,8 @@ mlx4_dev_link_status_handler(void *arg)
        ret = priv_dev_status_handler(priv, dev, &events);
        priv_unlock(priv);
        if (ret > 0 && events & (1 << RTE_ETH_EVENT_INTR_LSC))
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL,
+                                             NULL);
 }
 
 /**
@@ -5393,7 +5433,8 @@ mlx4_dev_interrupt_handler(void *cb_arg)
                     i++) {
                        if (ev & (1 << i)) {
                                ev &= ~(1 << i);
-                               _rte_eth_dev_callback_process(dev, i, NULL);
+                               _rte_eth_dev_callback_process(dev, i, NULL,
+                                                             NULL);
                                ret--;
                        }
                }
@@ -5587,6 +5628,154 @@ priv_dev_removal_interrupt_handler_install(struct priv *priv,
        return 0;
 }
 
+/**
+ * Allocate queue vector and fill epoll fd list for Rx interrupts.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ *
+ * @return
+ *   0 on success, negative on failure.
+ */
+static int
+priv_rx_intr_vec_enable(struct priv *priv)
+{
+       unsigned int i;
+       unsigned int rxqs_n = priv->rxqs_n;
+       unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+       unsigned int count = 0;
+       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+       if (!priv->dev->data->dev_conf.intr_conf.rxq)
+               return 0;
+       priv_rx_intr_vec_disable(priv);
+       intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+       if (intr_handle->intr_vec == NULL) {
+               ERROR("failed to allocate memory for interrupt vector,"
+                     " Rx interrupts will not be supported");
+               return -ENOMEM;
+       }
+       intr_handle->type = RTE_INTR_HANDLE_EXT;
+       for (i = 0; i != n; ++i) {
+               struct rxq *rxq = (*priv->rxqs)[i];
+               int fd;
+               int flags;
+               int rc;
+
+               /* Skip queues that cannot request interrupts. */
+               if (!rxq || !rxq->channel) {
+                       /* Use invalid intr_vec[] index to disable entry. */
+                       intr_handle->intr_vec[i] =
+                               RTE_INTR_VEC_RXTX_OFFSET +
+                               RTE_MAX_RXTX_INTR_VEC_ID;
+                       continue;
+               }
+               if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
+                       ERROR("too many Rx queues for interrupt vector size"
+                             " (%d), Rx interrupts cannot be enabled",
+                             RTE_MAX_RXTX_INTR_VEC_ID);
+                       priv_rx_intr_vec_disable(priv);
+                       return -1;
+               }
+               fd = rxq->channel->fd;
+               flags = fcntl(fd, F_GETFL);
+               rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+               if (rc < 0) {
+                       ERROR("failed to make Rx interrupt file descriptor"
+                             " %d non-blocking for queue index %d", fd, i);
+                       priv_rx_intr_vec_disable(priv);
+                       return rc;
+               }
+               intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
+               intr_handle->efds[count] = fd;
+               count++;
+       }
+       if (!count)
+               priv_rx_intr_vec_disable(priv);
+       else
+               intr_handle->nb_efd = count;
+       return 0;
+}
+
+/**
+ * Clean up Rx interrupts handler.
+ *
+ * @param priv
+ *   Pointer to private structure.
+ */
+static void
+priv_rx_intr_vec_disable(struct priv *priv)
+{
+       struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
+
+       rte_intr_free_epoll_fd(intr_handle);
+       free(intr_handle->intr_vec);
+       intr_handle->nb_efd = 0;
+       intr_handle->intr_vec = NULL;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt enable.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Rx queue index.
+ *
+ * @return
+ *   0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_enable(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct rxq *rxq = (*priv->rxqs)[idx];
+       int ret;
+
+       if (!rxq || !rxq->channel)
+               ret = EINVAL;
+       else
+               ret = ibv_req_notify_cq(rxq->cq, 0);
+       if (ret)
+               WARN("unable to arm interrupt on rx queue %d", idx);
+       return -ret;
+}
+
+/**
+ * DPDK callback for Rx queue interrupt disable.
+ *
+ * @param dev
+ *   Pointer to Ethernet device structure.
+ * @param idx
+ *   Rx queue index.
+ *
+ * @return
+ *   0 on success, negative on failure.
+ */
+static int
+mlx4_rx_intr_disable(struct rte_eth_dev *dev, uint16_t idx)
+{
+       struct priv *priv = dev->data->dev_private;
+       struct rxq *rxq = (*priv->rxqs)[idx];
+       struct ibv_cq *ev_cq;
+       void *ev_ctx;
+       int ret;
+
+       if (!rxq || !rxq->channel) {
+               ret = EINVAL;
+       } else {
+               ret = ibv_get_cq_event(rxq->cq->channel, &ev_cq, &ev_ctx);
+               if (ret || ev_cq != rxq->cq)
+                       ret = EINVAL;
+       }
+       if (ret)
+               WARN("unable to disable interrupt on rx queue %d",
+                    idx);
+       else
+               ibv_ack_cq_events(rxq->cq, 1);
+       return -ret;
+}
+
 /**
  * Verify and store value for device argument.
  *
@@ -5998,6 +6187,15 @@ mlx4_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
 
                eth_dev->device->driver = &mlx4_driver.driver;
 
+               /*
+                * Copy and override interrupt handle to prevent it from
+                * being shared between all ethdev instances of a given PCI
+                * device. This is required to properly handle Rx interrupts
+                * on all ports.
+                */
+               priv->intr_handle_dev = *eth_dev->intr_handle;
+               eth_dev->intr_handle = &priv->intr_handle_dev;
+
                priv->dev = eth_dev;
                eth_dev->dev_ops = &mlx4_dev_ops;
 
@@ -6086,7 +6284,7 @@ rte_mlx4_pmd_init(void)
         */
        setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
        ibv_fork_init();
-       rte_eal_pci_register(&mlx4_driver);
+       rte_pci_register(&mlx4_driver);
 }
 
 RTE_PMD_EXPORT_NAME(net_mlx4, __COUNTER__);