net/mlx5: support shared Rx queue port data path
[dpdk.git] / drivers / net / af_packet / rte_eth_af_packet.c
index 7d0ff1c..1396f32 100644 (file)
@@ -8,8 +8,8 @@
 
 #include <rte_string_fns.h>
 #include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_vdev.h>
+#include <ethdev_driver.h>
+#include <ethdev_vdev.h>
 #include <rte_malloc.h>
 #include <rte_kvargs.h>
 #include <rte_bus_vdev.h>
@@ -48,6 +48,7 @@ struct pkt_rx_queue {
 
        struct rte_mempool *mb_pool;
        uint16_t in_port;
+       uint8_t vlan_strip;
 
        volatile unsigned long rx_pkts;
        volatile unsigned long rx_bytes;
@@ -78,6 +79,7 @@ struct pmd_internals {
 
        struct pkt_rx_queue *rx_queue;
        struct pkt_tx_queue *tx_queue;
+       uint8_t vlan_strip;
 };
 
 static const char *valid_arguments[] = {
@@ -91,13 +93,13 @@ static const char *valid_arguments[] = {
 };
 
 static struct rte_eth_link pmd_link = {
-       .link_speed = ETH_SPEED_NUM_10G,
-       .link_duplex = ETH_LINK_FULL_DUPLEX,
-       .link_status = ETH_LINK_DOWN,
-       .link_autoneg = ETH_LINK_FIXED,
+       .link_speed = RTE_ETH_SPEED_NUM_10G,
+       .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
+       .link_status = RTE_ETH_LINK_DOWN,
+       .link_autoneg = RTE_ETH_LINK_FIXED,
 };
 
-RTE_LOG_REGISTER(af_packet_logtype, pmd.net.packet, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(af_packet_logtype, NOTICE);
 
 #define PMD_LOG(level, fmt, args...) \
        rte_log(RTE_LOG_ ## level, af_packet_logtype, \
@@ -147,7 +149,10 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                /* check for vlan info */
                if (ppd->tp_status & TP_STATUS_VLAN_VALID) {
                        mbuf->vlan_tci = ppd->tp_vlan_tci;
-                       mbuf->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+                       mbuf->ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
+
+                       if (!pkt_q->vlan_strip && rte_vlan_insert(&mbuf))
+                               PMD_LOG(ERR, "Failed to reinsert VLAN tag");
                }
 
                /* release incoming frame and advance ring buffer */
@@ -167,6 +172,26 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        return num_rx;
 }
 
+/*
+ * Check if there is an available frame in the ring
+ */
+static inline bool
+tx_ring_status_available(uint32_t tp_status)
+{
+       /*
+        * We eliminate the timestamp status from the packet status.
+        * This should only matter if timestamping is enabled on the socket,
+        * but there is a bug in the kernel which is fixed in newer releases.
+        *
+        * See the following kernel commit for reference:
+        *     commit 171c3b151118a2fe0fc1e2a9d1b5a1570cfe82d2
+        *     net: packetmmap: fix only tx timestamp on request
+        */
+       tp_status &= ~(TP_STATUS_TS_SOFTWARE | TP_STATUS_TS_RAW_HARDWARE);
+
+       return tp_status == TP_STATUS_AVAILABLE;
+}
+
 /*
  * Callback to handle sending packets through a real NIC.
  */
@@ -204,7 +229,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                }
 
                /* insert vlan info if necessary */
-               if (mbuf->ol_flags & PKT_TX_VLAN_PKT) {
+               if (mbuf->ol_flags & RTE_MBUF_F_TX_VLAN) {
                        if (rte_vlan_insert(&mbuf)) {
                                rte_pktmbuf_free(mbuf);
                                continue;
@@ -212,8 +237,30 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                }
 
                /* point at the next incoming frame */
-               if ((ppd->tp_status != TP_STATUS_AVAILABLE) &&
-                   (poll(&pfd, 1, -1) < 0))
+               if (!tx_ring_status_available(ppd->tp_status)) {
+                       if (poll(&pfd, 1, -1) < 0)
+                               break;
+
+                       /* poll() can return POLLERR if the interface is down */
+                       if (pfd.revents & POLLERR)
+                               break;
+               }
+
+               /*
+                * poll() will almost always return POLLOUT, even if there
+                * are no extra buffers available
+                *
+                * This happens, because packet_poll() calls datagram_poll()
+                * which checks the space left in the socket buffer and,
+                * in the case of packet_mmap, the default socket buffer length
+                * doesn't match the requested size for the tx_ring.
+                * As such, there is almost always space left in socket buffer,
+                * which doesn't seem to be correlated to the requested size
+                * for the tx_ring in packet_mmap.
+                *
+                * This results in poll() returning POLLOUT.
+                */
+               if (!tx_ring_status_available(ppd->tp_status))
                        break;
 
                /* copy the tx frame data */
@@ -265,14 +312,14 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 static int
 eth_dev_start(struct rte_eth_dev *dev)
 {
-       dev->data->dev_link.link_status = ETH_LINK_UP;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
        return 0;
 }
 
 /*
  * This function gets called when the current port gets stopped.
  */
-static void
+static int
 eth_dev_stop(struct rte_eth_dev *dev)
 {
        unsigned i;
@@ -295,12 +342,18 @@ eth_dev_stop(struct rte_eth_dev *dev)
                internals->tx_queue[i].sockfd = -1;
        }
 
-       dev->data->dev_link.link_status = ETH_LINK_DOWN;
+       dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
+       return 0;
 }
 
 static int
 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
 {
+       struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
+       const struct rte_eth_rxmode *rxmode = &dev_conf->rxmode;
+       struct pmd_internals *internals = dev->data->dev_private;
+
+       internals->vlan_strip = !!(rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
        return 0;
 }
 
@@ -311,12 +364,13 @@ eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        dev_info->if_index = internals->if_index;
        dev_info->max_mac_addrs = 1;
-       dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
+       dev_info->max_rx_pktlen = RTE_ETHER_MAX_LEN;
        dev_info->max_rx_queues = (uint16_t)internals->nb_queues;
        dev_info->max_tx_queues = (uint16_t)internals->nb_queues;
        dev_info->min_rx_bufsize = 0;
-       dev_info->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS |
-               DEV_TX_OFFLOAD_VLAN_INSERT;
+       dev_info->tx_offload_capa = RTE_ETH_TX_OFFLOAD_MULTI_SEGS |
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT;
+       dev_info->rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
 
        return 0;
 }
@@ -377,14 +431,33 @@ eth_stats_reset(struct rte_eth_dev *dev)
 }
 
 static int
-eth_dev_close(struct rte_eth_dev *dev __rte_unused)
+eth_dev_close(struct rte_eth_dev *dev)
 {
-       return 0;
-}
+       struct pmd_internals *internals;
+       struct tpacket_req *req;
+       unsigned int q;
 
-static void
-eth_queue_release(void *q __rte_unused)
-{
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+               return 0;
+
+       PMD_LOG(INFO, "Closing AF_PACKET ethdev on NUMA socket %u",
+               rte_socket_id());
+
+       internals = dev->data->dev_private;
+       req = &internals->req;
+       for (q = 0; q < internals->nb_queues; q++) {
+               munmap(internals->rx_queue[q].map,
+                       2 * req->tp_block_size * req->tp_block_nr);
+               rte_free(internals->rx_queue[q].rd);
+               rte_free(internals->tx_queue[q].rd);
+       }
+       free(internals->if_name);
+       rte_free(internals->rx_queue);
+       rte_free(internals->tx_queue);
+
+       /* mac_addrs must not be freed alone because part of dev_private */
+       dev->data->mac_addrs = NULL;
+       return 0;
 }
 
 static int
@@ -423,6 +496,7 @@ eth_rx_queue_setup(struct rte_eth_dev *dev,
 
        dev->data->rx_queues[rx_queue_id] = pkt_q;
        pkt_q->in_port = dev->data->port_id;
+       pkt_q->vlan_strip = internals->vlan_strip;
 
        return 0;
 }
@@ -549,8 +623,6 @@ static const struct eth_dev_ops ops = {
        .promiscuous_disable = eth_dev_promiscuous_disable,
        .rx_queue_setup = eth_rx_queue_setup,
        .tx_queue_setup = eth_tx_queue_setup,
-       .rx_queue_release = eth_queue_release,
-       .tx_queue_release = eth_queue_release,
        .link_update = eth_link_update,
        .stats_get = eth_stats_get,
        .stats_reset = eth_stats_reset,
@@ -723,18 +795,18 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
                        goto error;
                }
 
+               if (qdisc_bypass) {
 #if defined(PACKET_QDISC_BYPASS)
-               rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
-                               &qdisc_bypass, sizeof(qdisc_bypass));
-               if (rc == -1) {
-                       PMD_LOG_ERRNO(ERR,
-                               "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
-                               name, pair->value);
-                       goto error;
-               }
-#else
-               RTE_SET_USED(qdisc_bypass);
+                       rc = setsockopt(qsockfd, SOL_PACKET, PACKET_QDISC_BYPASS,
+                                       &qdisc_bypass, sizeof(qdisc_bypass));
+                       if (rc == -1) {
+                               PMD_LOG_ERRNO(ERR,
+                                       "%s: could not set PACKET_QDISC_BYPASS on AF_PACKET socket for %s",
+                                       name, pair->value);
+                               goto error;
+                       }
 #endif
+               }
 
                rc = setsockopt(qsockfd, SOL_PACKET, PACKET_RX_RING, req, sizeof(*req));
                if (rc == -1) {
@@ -835,6 +907,7 @@ rte_pmd_init_internals(struct rte_vdev_device *dev,
        data->nb_tx_queues = (uint16_t)nb_queues;
        data->dev_link = pmd_link;
        data->mac_addrs = &(*internals)->eth_addr;
+       data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
 
        (*eth_dev)->dev_ops = &ops;
 
@@ -1033,13 +1106,7 @@ exit:
 static int
 rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
 {
-       struct rte_eth_dev *eth_dev = NULL;
-       struct pmd_internals *internals;
-       struct tpacket_req *req;
-       unsigned q;
-
-       PMD_LOG(INFO, "Closing AF_PACKET ethdev on numa socket %u",
-               rte_socket_id());
+       struct rte_eth_dev *eth_dev;
 
        if (dev == NULL)
                return -1;
@@ -1047,26 +1114,9 @@ rte_pmd_af_packet_remove(struct rte_vdev_device *dev)
        /* find the ethdev entry */
        eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
        if (eth_dev == NULL)
-               return -1;
-
-       /* mac_addrs must not be freed alone because part of dev_private */
-       eth_dev->data->mac_addrs = NULL;
-
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
-               return rte_eth_dev_release_port(eth_dev);
-
-       internals = eth_dev->data->dev_private;
-       req = &internals->req;
-       for (q = 0; q < internals->nb_queues; q++) {
-               munmap(internals->rx_queue[q].map,
-                       2 * req->tp_block_size * req->tp_block_nr);
-               rte_free(internals->rx_queue[q].rd);
-               rte_free(internals->tx_queue[q].rd);
-       }
-       free(internals->if_name);
-       rte_free(internals->rx_queue);
-       rte_free(internals->tx_queue);
+               return 0; /* port already released */
 
+       eth_dev_close(eth_dev);
        rte_eth_dev_release_port(eth_dev);
 
        return 0;