drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / net / memif / rte_eth_memif.c
index da71957..8cec493 100644 (file)
@@ -55,10 +55,10 @@ static const char * const valid_arguments[] = {
 };
 
 static const struct rte_eth_link pmd_link = {
-       .link_speed = ETH_SPEED_NUM_10G,
-       .link_duplex = ETH_LINK_FULL_DUPLEX,
-       .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_AUTONEG
+       .link_speed = RTE_ETH_SPEED_NUM_10G,
+       .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+       .link_status = RTE_ETH_LINK_DOWN,
+       .link_autoneg = RTE_ETH_LINK_AUTONEG
 };
 
 #define MEMIF_MP_SEND_REGION           "memif_mp_send_region"
@@ -199,6 +199,7 @@ memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *de
        dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
        dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
        dev_info->min_rx_bufsize = 0;
+       dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
        return 0;
 }
@@ -325,7 +326,8 @@ eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        /* consume interrupt */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0)
-               size = read(mq->intr_handle.fd, &b, sizeof(b));
+               size = read(rte_intr_fd_get(mq->intr_handle), &b,
+                           sizeof(b));
 
        ring_size = 1 << mq->log2_ring_size;
        mask = ring_size - 1;
@@ -383,9 +385,10 @@ next_slot:
                        if (mbuf != mbuf_head)
                                rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
 
-                       memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off),
-                              (uint8_t *)memif_get_buffer(proc_private, d0) + src_off,
-                              cp_len);
+                       rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *,
+                                                          dst_off),
+                               (uint8_t *)memif_get_buffer(proc_private, d0) +
+                               src_off, cp_len);
 
                        src_off += cp_len;
                        dst_off += cp_len;
@@ -460,7 +463,8 @@ eth_memif_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                uint64_t b;
                ssize_t size __rte_unused;
-               size = read(mq->intr_handle.fd, &b, sizeof(b));
+               size = read(rte_intr_fd_get(mq->intr_handle), &b,
+                           sizeof(b));
        }
 
        ring_size = 1 << mq->log2_ring_size;
@@ -566,7 +570,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                rte_eth_devices[mq->in_port].process_private;
        memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
        uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0;
-       uint16_t src_len, src_off, dst_len, dst_off, cp_len;
+       uint16_t src_len, src_off, dst_len, dst_off, cp_len, nb_segs;
        memif_ring_type_t type = mq->type;
        memif_desc_t *d0;
        struct rte_mbuf *mbuf;
@@ -614,6 +618,7 @@ eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
        while (n_tx_pkts < nb_pkts && n_free) {
                mbuf_head = *bufs++;
+               nb_segs = mbuf_head->nb_segs;
                mbuf = mbuf_head;
 
                saved_slot = slot;
@@ -644,9 +649,10 @@ next_in_chain:
                        }
                        cp_len = RTE_MIN(dst_len, src_len);
 
-                       memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off,
-                              rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
-                              cp_len);
+                       rte_memcpy((uint8_t *)memif_get_buffer(proc_private,
+                                                              d0) + dst_off,
+                               rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
+                               cp_len);
 
                        mq->n_bytes += cp_len;
                        src_off += cp_len;
@@ -657,7 +663,7 @@ next_in_chain:
                        d0->length = dst_off;
                }
 
-               if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+               if (--nb_segs > 0) {
                        mbuf = mbuf->next;
                        goto next_in_chain;
                }
@@ -676,7 +682,8 @@ no_free_slots:
 
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                a = 1;
-               size = write(mq->intr_handle.fd, &a, sizeof(a));
+               size = write(rte_intr_fd_get(mq->intr_handle), &a,
+                            sizeof(a));
                if (unlikely(size < 0)) {
                        MIF_LOG(WARNING,
                                "Failed to send interrupt. %s", strerror(errno));
@@ -694,6 +701,7 @@ memif_tx_one_zc(struct pmd_process_private *proc_private, struct memif_queue *mq
                uint16_t slot, uint16_t n_free)
 {
        memif_desc_t *d0;
+       uint16_t nb_segs = mbuf->nb_segs;
        int used_slots = 1;
 
 next_in_chain:
@@ -714,7 +722,7 @@ next_in_chain:
        d0->flags = 0;
 
        /* check if buffer is chained */
-       if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
+       if (--nb_segs > 0) {
                if (n_free < 2)
                        return 0;
                /* mark buffer as chained */
@@ -827,7 +835,8 @@ no_free_slots:
        /* Send interrupt, if enabled. */
        if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
                uint64_t a = 1;
-               ssize_t size = write(mq->intr_handle.fd, &a, sizeof(a));
+               ssize_t size = write(rte_intr_fd_get(mq->intr_handle),
+                                    &a, sizeof(a));
                if (unlikely(size < 0)) {
                        MIF_LOG(WARNING,
                                "Failed to send interrupt. %s", strerror(errno));
@@ -1087,8 +1096,10 @@ memif_init_queues(struct rte_eth_dev *dev)
                mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_C2S, i);
                mq->last_head = 0;
                mq->last_tail = 0;
-               mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
-               if (mq->intr_handle.fd < 0) {
+               if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+                       return -rte_errno;
+
+               if (rte_intr_fd_get(mq->intr_handle) < 0) {
                        MIF_LOG(WARNING,
                                "Failed to create eventfd for tx queue %d: %s.", i,
                                strerror(errno));
@@ -1110,8 +1121,9 @@ memif_init_queues(struct rte_eth_dev *dev)
                mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2C, i);
                mq->last_head = 0;
                mq->last_tail = 0;
-               mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
-               if (mq->intr_handle.fd < 0) {
+               if (rte_intr_fd_set(mq->intr_handle, eventfd(0, EFD_NONBLOCK)))
+                       return -rte_errno;
+               if (rte_intr_fd_get(mq->intr_handle) < 0) {
                        MIF_LOG(WARNING,
                                "Failed to create eventfd for rx queue %d: %s.", i,
                                strerror(errno));
@@ -1214,7 +1226,7 @@ memif_connect(struct rte_eth_dev *dev)
 
                pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
                pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
-               dev->data->dev_link.link_status = ETH_LINK_UP;
+               dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
        }
        MIF_LOG(INFO, "Connected.");
        return 0;
@@ -1253,9 +1265,9 @@ memif_dev_close(struct rte_eth_dev *dev)
                memif_disconnect(dev);
 
                for (i = 0; i < dev->data->nb_rx_queues; i++)
-                       (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
+                       (*dev->dev_ops->rx_queue_release)(dev, i);
                for (i = 0; i < dev->data->nb_tx_queues; i++)
-                       (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
+                       (*dev->dev_ops->tx_queue_release)(dev, i);
 
                memif_socket_remove_device(dev);
        } else {
@@ -1305,12 +1317,24 @@ memif_tx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       /* Allocate interrupt instance */
+       mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (mq->intr_handle == NULL) {
+               MIF_LOG(ERR, "Failed to allocate intr handle");
+               return -ENOMEM;
+       }
+
        mq->type =
            (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_C2S : MEMIF_RING_S2C;
        mq->n_pkts = 0;
        mq->n_bytes = 0;
-       mq->intr_handle.fd = -1;
-       mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+       if (rte_intr_fd_set(mq->intr_handle, -1))
+               return -rte_errno;
+
+       if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+               return -rte_errno;
+
        mq->in_port = dev->data->port_id;
        dev->data->tx_queues[qid] = mq;
 
@@ -1334,11 +1358,23 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
                return -ENOMEM;
        }
 
+       /* Allocate interrupt instance */
+       mq->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED);
+       if (mq->intr_handle == NULL) {
+               MIF_LOG(ERR, "Failed to allocate intr handle");
+               return -ENOMEM;
+       }
+
        mq->type = (pmd->role == MEMIF_ROLE_CLIENT) ? MEMIF_RING_S2C : MEMIF_RING_C2S;
        mq->n_pkts = 0;
        mq->n_bytes = 0;
-       mq->intr_handle.fd = -1;
-       mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
+
+       if (rte_intr_fd_set(mq->intr_handle, -1))
+               return -rte_errno;
+
+       if (rte_intr_type_set(mq->intr_handle, RTE_INTR_HANDLE_EXT))
+               return -rte_errno;
+
        mq->mempool = mb_pool;
        mq->in_port = dev->data->port_id;
        dev->data->rx_queues[qid] = mq;
@@ -1347,9 +1383,21 @@ memif_rx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-memif_queue_release(void *queue)
+memif_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
+{
+       struct memif_queue *mq = dev->data->rx_queues[qid];
+
+       if (!mq)
+               return;
+
+       rte_intr_instance_free(mq->intr_handle);
+       rte_free(mq);
+}
+
+static void
+memif_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
 {
-       struct memif_queue *mq = (struct memif_queue *)queue;
+       struct memif_queue *mq = dev->data->tx_queues[qid];
 
        if (!mq)
                return;
@@ -1365,10 +1413,10 @@ memif_link_update(struct rte_eth_dev *dev,
 
        if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
                proc_private = dev->process_private;
-               if (dev->data->dev_link.link_status == ETH_LINK_UP &&
+               if (dev->data->dev_link.link_status == RTE_ETH_LINK_UP &&
                                proc_private->regions_num == 0) {
                        memif_mp_request_regions(dev);
-               } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
+               } else if (dev->data->dev_link.link_status == RTE_ETH_LINK_DOWN &&
                                proc_private->regions_num > 0) {
                        memif_free_regions(dev);
                }
@@ -1466,8 +1514,8 @@ static const struct eth_dev_ops ops = {
        .dev_configure = memif_dev_configure,
        .tx_queue_setup = memif_tx_queue_setup,
        .rx_queue_setup = memif_rx_queue_setup,
-       .rx_queue_release = memif_queue_release,
-       .tx_queue_release = memif_queue_release,
+       .rx_queue_release = memif_rx_queue_release,
+       .tx_queue_release = memif_tx_queue_release,
        .rx_queue_intr_enable = memif_rx_queue_intr_enable,
        .rx_queue_intr_disable = memif_rx_queue_intr_disable,
        .link_update = memif_link_update,