net/mlx4: add Tx bypassing Verbs
[dpdk.git] / drivers / net / mlx4 / mlx4_rxtx.c
index b5e7777..38b87a0 100644 (file)
 
 #include <rte_branch_prediction.h>
 #include <rte_common.h>
+#include <rte_io.h>
 #include <rte_mbuf.h>
 #include <rte_mempool.h>
 #include <rte_prefetch.h>
 
 #include "mlx4.h"
+#include "mlx4_prm.h"
 #include "mlx4_rxtx.h"
 #include "mlx4_utils.h"
 
+/**
+ * Pointer-value pair structure used in tx_post_send for saving the first
+ * DWORD (32 byte) of a TXBB.
+ */
+struct pv {
+       struct mlx4_wqe_data_seg *dseg;
+       uint32_t val;
+};
+
+/**
+ * Stamp a WQE so it won't be reused by the HW.
+ *
+ * Routine is used when freeing WQE used by the chip or when failing
+ * building an WQ entry has failed leaving partial information on the queue.
+ *
+ * @param sq
+ *   Pointer to the SQ structure.
+ * @param index
+ *   Index of the freed WQE.
+ * @param num_txbbs
+ *   Number of blocks to stamp.
+ *   If < 0 the routine will use the size written in the WQ entry.
+ * @param owner
+ *   The value of the WQE owner bit to use in the stamp.
+ *
+ * @return
+ *   The number of Tx basic blocs (TXBB) the WQE contained.
+ */
+static int
+mlx4_txq_stamp_freed_wqe(struct mlx4_sq *sq, uint16_t index, uint8_t owner)
+{
+       uint32_t stamp = rte_cpu_to_be_32(MLX4_SQ_STAMP_VAL |
+                                         (!!owner << MLX4_SQ_STAMP_SHIFT));
+       uint8_t *wqe = mlx4_get_send_wqe(sq, (index & sq->txbb_cnt_mask));
+       uint32_t *ptr = (uint32_t *)wqe;
+       int i;
+       int txbbs_size;
+       int num_txbbs;
+
+       /* Extract the size from the control segment of the WQE. */
+       num_txbbs = MLX4_SIZE_TO_TXBBS((((struct mlx4_wqe_ctrl_seg *)
+                                        wqe)->fence_size & 0x3f) << 4);
+       txbbs_size = num_txbbs * MLX4_TXBB_SIZE;
+       /* Optimize the common case when there is no wrap-around. */
+       if (wqe + txbbs_size <= sq->eob) {
+               /* Stamp the freed descriptor. */
+               for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += MLX4_SQ_STAMP_DWORDS;
+               }
+       } else {
+               /* Stamp the freed descriptor. */
+               for (i = 0; i < txbbs_size; i += MLX4_SQ_STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += MLX4_SQ_STAMP_DWORDS;
+                       if ((uint8_t *)ptr >= sq->eob) {
+                               ptr = (uint32_t *)sq->buf;
+                               stamp ^= RTE_BE32(0x80000000);
+                       }
+               }
+       }
+       return num_txbbs;
+}
+
 /**
  * Manage Tx completions.
  *
@@ -80,26 +146,71 @@ mlx4_txq_complete(struct txq *txq)
        unsigned int elts_comp = txq->elts_comp;
        unsigned int elts_tail = txq->elts_tail;
        const unsigned int elts_n = txq->elts_n;
-       struct ibv_wc wcs[elts_comp];
-       int wcs_n;
+       struct mlx4_cq *cq = &txq->mcq;
+       struct mlx4_sq *sq = &txq->msq;
+       struct mlx4_cqe *cqe;
+       uint32_t cons_index = cq->cons_index;
+       uint16_t new_index;
+       uint16_t nr_txbbs = 0;
+       int pkts = 0;
 
        if (unlikely(elts_comp == 0))
                return 0;
-       wcs_n = ibv_poll_cq(txq->cq, elts_comp, wcs);
-       if (unlikely(wcs_n == 0))
+       /*
+        * Traverse over all CQ entries reported and handle each WQ entry
+        * reported by them.
+        */
+       do {
+               cqe = (struct mlx4_cqe *)mlx4_get_cqe(cq, cons_index);
+               if (unlikely(!!(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK) ^
+                   !!(cons_index & cq->cqe_cnt)))
+                       break;
+               /*
+                * Make sure we read the CQE after we read the ownership bit.
+                */
+               rte_rmb();
+               if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+                            MLX4_CQE_OPCODE_ERROR)) {
+                       struct mlx4_err_cqe *cqe_err =
+                               (struct mlx4_err_cqe *)cqe;
+                       ERROR("%p CQE error - vendor syndrome: 0x%x"
+                             " syndrome: 0x%x\n",
+                             (void *)txq, cqe_err->vendor_err,
+                             cqe_err->syndrome);
+               }
+               /* Get WQE index reported in the CQE. */
+               new_index =
+                       rte_be_to_cpu_16(cqe->wqe_index) & sq->txbb_cnt_mask;
+               do {
+                       /* Free next descriptor. */
+                       nr_txbbs +=
+                               mlx4_txq_stamp_freed_wqe(sq,
+                                    (sq->tail + nr_txbbs) & sq->txbb_cnt_mask,
+                                    !!((sq->tail + nr_txbbs) & sq->txbb_cnt));
+                       pkts++;
+               } while (((sq->tail + nr_txbbs) & sq->txbb_cnt_mask) !=
+                        new_index);
+               cons_index++;
+       } while (1);
+       if (unlikely(pkts == 0))
                return 0;
-       if (unlikely(wcs_n < 0)) {
-               DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)",
-                     (void *)txq, wcs_n);
-               return -1;
-       }
-       elts_comp -= wcs_n;
+       /*
+        * Update CQ.
+        * To prevent CQ overflow we first update CQ consumer and only then
+        * the ring consumer.
+        */
+       cq->cons_index = cons_index;
+       *cq->set_ci_db = rte_cpu_to_be_32(cq->cons_index & 0xffffff);
+       rte_wmb();
+       sq->tail = sq->tail + nr_txbbs;
+       /* Update the list of packets posted for transmission. */
+       elts_comp -= pkts;
        assert(elts_comp <= txq->elts_comp);
        /*
-        * Assume WC status is successful as nothing can be done about it
-        * anyway.
+        * Assume completion status is successful as nothing can be done about
+        * it anyway.
         */
-       elts_tail += wcs_n * txq->elts_comp_cd_init;
+       elts_tail += pkts;
        if (elts_tail >= elts_n)
                elts_tail -= elts_n;
        txq->elts_tail = elts_tail;
@@ -182,6 +293,161 @@ mlx4_txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
        return txq->mp2mr[i].lkey;
 }
 
+/**
+ * Posts a single work request to a send queue.
+ *
+ * @param txq
+ *   Target Tx queue.
+ * @param pkt
+ *   Packet to transmit.
+ *
+ * @return
+ *   0 on success, negative errno value otherwise and rte_errno is set.
+ */
+static inline int
+mlx4_post_send(struct txq *txq, struct rte_mbuf *pkt)
+{
+       struct mlx4_wqe_ctrl_seg *ctrl;
+       struct mlx4_wqe_data_seg *dseg;
+       struct mlx4_sq *sq = &txq->msq;
+       struct rte_mbuf *buf;
+       uint32_t head_idx = sq->head & sq->txbb_cnt_mask;
+       uint32_t lkey;
+       uintptr_t addr;
+       uint32_t srcrb_flags;
+       uint32_t owner_opcode = MLX4_OPCODE_SEND;
+       uint32_t byte_count;
+       int wqe_real_size;
+       int nr_txbbs;
+       int rc;
+       struct pv *pv = (struct pv *)txq->bounce_buf;
+       int pv_counter = 0;
+
+       /* Calculate the needed work queue entry size for this packet. */
+       wqe_real_size = sizeof(struct mlx4_wqe_ctrl_seg) +
+                       pkt->nb_segs * sizeof(struct mlx4_wqe_data_seg);
+       nr_txbbs = MLX4_SIZE_TO_TXBBS(wqe_real_size);
+       /*
+        * Check that there is room for this WQE in the send queue and that
+        * the WQE size is legal.
+        */
+       if (((sq->head - sq->tail) + nr_txbbs +
+            sq->headroom_txbbs) >= sq->txbb_cnt ||
+           nr_txbbs > MLX4_MAX_WQE_TXBBS) {
+               rc = ENOSPC;
+               goto err;
+       }
+       /* Get the control and data entries of the WQE. */
+       ctrl = (struct mlx4_wqe_ctrl_seg *)mlx4_get_send_wqe(sq, head_idx);
+       dseg = (struct mlx4_wqe_data_seg *)((uintptr_t)ctrl +
+                                           sizeof(struct mlx4_wqe_ctrl_seg));
+       /* Fill the data segments with buffer information. */
+       for (buf = pkt; buf != NULL; buf = buf->next, dseg++) {
+               addr = rte_pktmbuf_mtod(buf, uintptr_t);
+               rte_prefetch0((volatile void *)addr);
+               /* Handle WQE wraparound. */
+               if (unlikely(dseg >= (struct mlx4_wqe_data_seg *)sq->eob))
+                       dseg = (struct mlx4_wqe_data_seg *)sq->buf;
+               dseg->addr = rte_cpu_to_be_64(addr);
+               /* Memory region key for this memory pool. */
+               lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
+               if (unlikely(lkey == (uint32_t)-1)) {
+                       /* MR does not exist. */
+                       DEBUG("%p: unable to get MP <-> MR association",
+                             (void *)txq);
+                       /*
+                        * Restamp entry in case of failure.
+                        * Make sure that size is written correctly
+                        * Note that we give ownership to the SW, not the HW.
+                        */
+                       ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
+                       mlx4_txq_stamp_freed_wqe(sq, head_idx,
+                                    (sq->head & sq->txbb_cnt) ? 0 : 1);
+                       rc = EFAULT;
+                       goto err;
+               }
+               dseg->lkey = rte_cpu_to_be_32(lkey);
+               if (likely(buf->data_len)) {
+                       byte_count = rte_cpu_to_be_32(buf->data_len);
+               } else {
+                       /*
+                        * Zero length segment is treated as inline segment
+                        * with zero data.
+                        */
+                       byte_count = RTE_BE32(0x80000000);
+               }
+               /*
+                * If the data segment is not at the beginning of a
+                * Tx basic block (TXBB) then write the byte count,
+                * else postpone the writing to just before updating the
+                * control segment.
+                */
+               if ((uintptr_t)dseg & (uintptr_t)(MLX4_TXBB_SIZE - 1)) {
+                       /*
+                        * Need a barrier here before writing the byte_count
+                        * fields to make sure that all the data is visible
+                        * before the byte_count field is set.
+                        * Otherwise, if the segment begins a new cacheline,
+                        * the HCA prefetcher could grab the 64-byte chunk and
+                        * get a valid (!= 0xffffffff) byte count but stale
+                        * data, and end up sending the wrong data.
+                        */
+                       rte_io_wmb();
+                       dseg->byte_count = byte_count;
+               } else {
+                       /*
+                        * This data segment starts at the beginning of a new
+                        * TXBB, so we need to postpone its byte_count writing
+                        * for later.
+                        */
+                       pv[pv_counter].dseg = dseg;
+                       pv[pv_counter++].val = byte_count;
+               }
+       }
+       /* Write the first DWORD of each TXBB save earlier. */
+       if (pv_counter) {
+               /* Need a barrier here before writing the byte_count. */
+               rte_io_wmb();
+               for (--pv_counter; pv_counter  >= 0; pv_counter--)
+                       pv[pv_counter].dseg->byte_count = pv[pv_counter].val;
+       }
+       /* Fill the control parameters for this packet. */
+       ctrl->fence_size = (wqe_real_size >> 4) & 0x3f;
+       /*
+        * The caller should prepare "imm" in advance in order to support
+        * VF to VF communication (when the device is a virtual-function
+        * device (VF)).
+        */
+       ctrl->imm = 0;
+       /*
+        * For raw Ethernet, the SOLICIT flag is used to indicate that no ICRC
+        * should be calculated.
+        */
+       txq->elts_comp_cd -= nr_txbbs;
+       if (unlikely(txq->elts_comp_cd <= 0)) {
+               txq->elts_comp_cd = txq->elts_comp_cd_init;
+               srcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT |
+                                      MLX4_WQE_CTRL_CQ_UPDATE);
+       } else {
+               srcrb_flags = RTE_BE32(MLX4_WQE_CTRL_SOLICIT);
+       }
+       ctrl->srcrb_flags = srcrb_flags;
+       /*
+        * Make sure descriptor is fully written before
+        * setting ownership bit (because HW can start
+        * executing as soon as we do).
+        */
+       rte_wmb();
+       ctrl->owner_opcode = rte_cpu_to_be_32(owner_opcode |
+                                             ((sq->head & sq->txbb_cnt) ?
+                                              MLX4_BIT_WQE_OWN : 0));
+       sq->head += nr_txbbs;
+       return 0;
+err:
+       rte_errno = rc;
+       return -rc;
+}
+
 /**
  * DPDK callback for Tx.
  *
@@ -199,18 +465,15 @@ uint16_t
 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
-       struct ibv_send_wr *wr_head = NULL;
-       struct ibv_send_wr **wr_next = &wr_head;
-       struct ibv_send_wr *wr_bad = NULL;
        unsigned int elts_head = txq->elts_head;
        const unsigned int elts_n = txq->elts_n;
-       unsigned int elts_comp_cd = txq->elts_comp_cd;
        unsigned int elts_comp = 0;
+       unsigned int bytes_sent = 0;
        unsigned int i;
        unsigned int max;
        int err;
 
-       assert(elts_comp_cd != 0);
+       assert(txq->elts_comp_cd != 0);
        mlx4_txq_complete(txq);
        max = (elts_n - (elts_head - txq->elts_tail));
        if (max > elts_n)
@@ -229,10 +492,6 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
                struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
                struct txq_elt *elt = &(*txq->elts)[elts_head];
-               struct ibv_send_wr *wr = &elt->wr;
-               unsigned int segs = buf->nb_segs;
-               unsigned int sent_size = 0;
-               uint32_t send_flags = 0;
 
                /* Clean up old buffer. */
                if (likely(elt->buf != NULL)) {
@@ -250,100 +509,31 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                tmp = next;
                        } while (tmp != NULL);
                }
-               /* Request Tx completion. */
-               if (unlikely(--elts_comp_cd == 0)) {
-                       elts_comp_cd = txq->elts_comp_cd_init;
-                       ++elts_comp;
-                       send_flags |= IBV_SEND_SIGNALED;
-               }
-               if (likely(segs == 1)) {
-                       struct ibv_sge *sge = &elt->sge;
-                       uintptr_t addr;
-                       uint32_t length;
-                       uint32_t lkey;
-
-                       /* Retrieve buffer information. */
-                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
-                       length = buf->data_len;
-                       /* Retrieve memory region key for this memory pool. */
-                       lkey = mlx4_txq_mp2mr(txq, mlx4_txq_mb2mp(buf));
-                       if (unlikely(lkey == (uint32_t)-1)) {
-                               /* MR does not exist. */
-                               DEBUG("%p: unable to get MP <-> MR"
-                                     " association", (void *)txq);
-                               /* Clean up Tx element. */
-                               elt->buf = NULL;
-                               goto stop;
-                       }
-                       /* Update element. */
-                       elt->buf = buf;
-                       if (txq->priv->vf)
-                               rte_prefetch0((volatile void *)
-                                             (uintptr_t)addr);
-                       RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
-                       sge->addr = addr;
-                       sge->length = length;
-                       sge->lkey = lkey;
-                       sent_size += length;
-               } else {
-                       err = -1;
+               RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+               /* Post the packet for sending. */
+               err = mlx4_post_send(txq, buf);
+               if (unlikely(err)) {
+                       elt->buf = NULL;
                        goto stop;
                }
-               if (sent_size <= txq->max_inline)
-                       send_flags |= IBV_SEND_INLINE;
+               elt->buf = buf;
+               bytes_sent += buf->pkt_len;
+               ++elts_comp;
                elts_head = elts_head_next;
-               /* Increment sent bytes counter. */
-               txq->stats.obytes += sent_size;
-               /* Set up WR. */
-               wr->sg_list = &elt->sge;
-               wr->num_sge = segs;
-               wr->opcode = IBV_WR_SEND;
-               wr->send_flags = send_flags;
-               *wr_next = wr;
-               wr_next = &wr->next;
        }
 stop:
        /* Take a shortcut if nothing must be sent. */
        if (unlikely(i == 0))
                return 0;
-       /* Increment sent packets counter. */
+       /* Increment send statistics counters. */
        txq->stats.opackets += i;
+       txq->stats.obytes += bytes_sent;
+       /* Make sure that descriptors are written before doorbell record. */
+       rte_wmb();
        /* Ring QP doorbell. */
-       *wr_next = NULL;
-       assert(wr_head);
-       err = ibv_post_send(txq->qp, wr_head, &wr_bad);
-       if (unlikely(err)) {
-               uint64_t obytes = 0;
-               uint64_t opackets = 0;
-
-               /* Rewind bad WRs. */
-               while (wr_bad != NULL) {
-                       int j;
-
-                       /* Force completion request if one was lost. */
-                       if (wr_bad->send_flags & IBV_SEND_SIGNALED) {
-                               elts_comp_cd = 1;
-                               --elts_comp;
-                       }
-                       ++opackets;
-                       for (j = 0; j < wr_bad->num_sge; ++j)
-                               obytes += wr_bad->sg_list[j].length;
-                       elts_head = (elts_head ? elts_head : elts_n) - 1;
-                       wr_bad = wr_bad->next;
-               }
-               txq->stats.opackets -= opackets;
-               txq->stats.obytes -= obytes;
-               i -= opackets;
-               DEBUG("%p: ibv_post_send() failed, %" PRIu64 " packets"
-                     " (%" PRIu64 " bytes) rejected: %s",
-                     (void *)txq,
-                     opackets,
-                     obytes,
-                     (err <= -1) ? "Internal error" : strerror(err));
-       }
+       rte_write32(txq->msq.doorbell_qpn, txq->msq.db);
        txq->elts_head = elts_head;
        txq->elts_comp += elts_comp;
-       txq->elts_comp_cd = elts_comp_cd;
        return i;
 }
 
@@ -459,7 +649,7 @@ repost:
        /* Repost WRs. */
        *wr_next = NULL;
        assert(wr_head);
-       ret = ibv_post_recv(rxq->qp, wr_head, &wr_bad);
+       ret = ibv_post_wq_recv(rxq->wq, wr_head, &wr_bad);
        if (unlikely(ret)) {
                /* Inability to repost WRs is fatal. */
                DEBUG("%p: recv_burst(): failed (ret=%d)",