net/enic: reduce Tx completion updates
authorHyong Youb Kim <hyonkim@cisco.com>
Fri, 29 Jun 2018 09:29:39 +0000 (02:29 -0700)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 2 Jul 2018 23:54:22 +0000 (01:54 +0200)
Request one completion update per roughly 32 buffers. It saves DMA
resources on the NIC, PCIe utilization, and cache miss rates.

Signed-off-by: Hyong Youb Kim <hyonkim@cisco.com>
Reviewed-by: John Daley <johndale@cisco.com>
drivers/net/enic/base/vnic_wq.c
drivers/net/enic/base/vnic_wq.h
drivers/net/enic/enic_res.h
drivers/net/enic/enic_rxtx.c

index a4c08a7..c9bf357 100644 (file)
@@ -113,6 +113,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
        vnic_wq_init_start(wq, cq_index, 0, 0,
                error_interrupt_enable,
                error_interrupt_offset);
+       wq->cq_pend = 0;
        wq->last_completed_index = 0;
 }
 
index 6622a8a..236cf69 100644 (file)
@@ -44,6 +44,7 @@ struct vnic_wq {
        struct vnic_dev_ring ring;
        struct rte_mbuf **bufs;
        unsigned int head_idx;
+       unsigned int cq_pend;
        unsigned int tail_idx;
        unsigned int socket_id;
        const struct rte_memzone *cqmsg_rz;
index 6a3a0c5..6b1f6ac 100644 (file)
@@ -20,6 +20,9 @@
 #define ENIC_ALIGN_DESCS               32
 #define ENIC_ALIGN_DESCS_MASK          ~(ENIC_ALIGN_DESCS - 1)
 
+/* Request a completion index every 32 buffers (roughly packets) */
+#define ENIC_WQ_CQ_THRESH              32
+
 #define ENIC_MIN_MTU                   68
 
 /* Does not include (possible) inserted VLAN tag and FCS */
index 89a1e66..7cddb53 100644 (file)
@@ -603,7 +603,7 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        struct wq_enet_desc *descs, *desc_p, desc_tmp;
        uint16_t mss;
        uint8_t vlan_tag_insert;
-       uint8_t eop;
+       uint8_t eop, cq;
        uint64_t bus_addr;
        uint8_t offload_mode;
        uint16_t header_len;
@@ -686,10 +686,14 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                                break;
                        }
                }
-
-
+               wq->cq_pend++;
+               cq = 0;
+               if (eop && wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+                       cq = 1;
+                       wq->cq_pend = 0;
+               }
                wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, header_len,
-                                offload_mode, eop, eop, 0, vlan_tag_insert,
+                                offload_mode, eop, cq, 0, vlan_tag_insert,
                                 vlan_id, 0);
 
                *desc_p = desc_tmp;
@@ -702,14 +706,21 @@ uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                            tx_pkt->next) {
                                data_len = tx_pkt->data_len;
 
-                               if (tx_pkt->next == NULL)
+                               wq->cq_pend++;
+                               cq = 0;
+                               if (tx_pkt->next == NULL) {
                                        eop = 1;
+                                       if (wq->cq_pend >= ENIC_WQ_CQ_THRESH) {
+                                               cq = 1;
+                                               wq->cq_pend = 0;
+                                       }
+                               }
                                desc_p = descs + head_idx;
                                bus_addr = (dma_addr_t)(tx_pkt->buf_iova
                                           + tx_pkt->data_off);
                                wq_enet_desc_enc((struct wq_enet_desc *)
                                                 &desc_tmp, bus_addr, data_len,
-                                                mss, 0, offload_mode, eop, eop,
+                                                mss, 0, offload_mode, eop, cq,
                                                 0, vlan_tag_insert, vlan_id,
                                                 0);