net/enic: support mbuf fast free offload
authorHyong Youb Kim <hyonkim@cisco.com>
Fri, 29 Jun 2018 09:29:38 +0000 (02:29 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 2 Jul 2018 23:54:20 +0000 (01:54 +0200)
Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
doc/guides/nics/features/enic.ini
drivers/net/enic/base/vnic_wq.h
drivers/net/enic/enic.h
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_res.c
drivers/net/enic/enic_rxtx.c

index ae46d29..31a3fb5 100644 (file)
@@ -6,6 +6,7 @@
 [Features]
 Link status          = Y
 Link status event    = Y
+Fast mbuf free       = Y
 Rx interrupt         = Y
 Queue start/stop     = Y
 MTU update           = Y
index 86ac10e..6622a8a 100644 (file)
@@ -48,6 +48,7 @@ struct vnic_wq {
        unsigned int socket_id;
        const struct rte_memzone *cqmsg_rz;
        uint16_t last_completed_index;
+       uint64_t offloads;
 };
 
 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
index b611f0a..af790fc 100644 (file)
@@ -183,6 +183,7 @@ struct enic {
 
        uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
        uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+       uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
        uint64_t tx_offload_mask; /* PKT_TX flags accepted */
 };
 
index 117b362..ef18f88 100644 (file)
@@ -185,17 +185,21 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
        uint16_t queue_idx,
        uint16_t nb_desc,
        unsigned int socket_id,
-       __rte_unused const struct rte_eth_txconf *tx_conf)
+       const struct rte_eth_txconf *tx_conf)
 {
        int ret;
        struct enic *enic = pmd_priv(eth_dev);
+       struct vnic_wq *wq;
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return -E_RTE_SECONDARY;
 
        ENICPMD_FUNC_TRACE();
        RTE_ASSERT(queue_idx < enic->conf_wq_count);
-       eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+       wq = &enic->wq[queue_idx];
+       wq->offloads = tx_conf->offloads |
+               eth_dev->data->dev_conf.txmode.offloads;
+       eth_dev->data->tx_queues[queue_idx] = (void *)wq;
 
        ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
        if (ret) {
@@ -477,6 +481,7 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
        device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
        device_info->rx_offload_capa = enic->rx_offload_capa;
        device_info->tx_offload_capa = enic->tx_offload_capa;
+       device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
        device_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
        };
@@ -765,7 +770,7 @@ static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
        ENICPMD_FUNC_TRACE();
        qinfo->nb_desc = wq->ring.desc_count;
        memset(&qinfo->conf, 0, sizeof(qinfo->conf));
-       qinfo->conf.offloads = enic->tx_offload_capa;
+       qinfo->conf.offloads = wq->offloads;
        /* tx_thresh, and all the other fields are not applicable for enic */
 }
 
index d1113b2..11d66a6 100644 (file)
@@ -183,7 +183,9 @@ int enic_get_vnic_config(struct enic *enic)
         * Default hardware capabilities. enic_dev_init() may add additional
         * flags if it enables overlay offloads.
         */
+       enic->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
        enic->tx_offload_capa =
+               enic->tx_queue_offload_capa |
                DEV_TX_OFFLOAD_MULTI_SEGS |
                DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_IPV4_CKSUM |
index 549288c..89a1e66 100644 (file)
@@ -471,6 +471,31 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
        return nb_rx;
 }
 
+static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+{
+       unsigned int desc_count, n, nb_to_free, tail_idx;
+       struct rte_mempool *pool;
+       struct rte_mbuf **m;
+
+       desc_count = wq->ring.desc_count;
+       nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+                                  + 1;
+       tail_idx = wq->tail_idx;
+       wq->tail_idx += nb_to_free;
+       wq->ring.desc_avail += nb_to_free;
+       if (wq->tail_idx >= desc_count)
+               wq->tail_idx -= desc_count;
+       /* First, free at most until the end of ring */
+       m = &wq->bufs[tail_idx];
+       pool = (*m)->pool;
+       n = RTE_MIN(nb_to_free, desc_count - tail_idx);
+       rte_mempool_put_bulk(pool, (void **)m, n);
+       n = nb_to_free - n;
+       /* Then wrap and free the rest */
+       if (unlikely(n))
+               rte_mempool_put_bulk(pool, (void **)wq->bufs, n);
+}
+
 static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
 {
        struct rte_mbuf *buf;
@@ -518,7 +543,10 @@ unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq)
        completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
 
        if (wq->last_completed_index != completed_index) {
-               enic_free_wq_bufs(wq, completed_index);
+               if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+                       enic_fast_free_wq_bufs(wq, completed_index);
+               else
+                       enic_free_wq_bufs(wq, completed_index);
                wq->last_completed_index = completed_index;
        }
        return 0;