fm10k: support Xen domain0
[dpdk.git] / drivers / net / mlx4 / mlx4.c
index f76f415..f4491e7 100644 (file)
@@ -139,14 +139,11 @@ static inline void wr_id_t_check(void)
        (void)wr_id_t_check;
 }
 
-/* If raw send operations are available, use them since they are faster. */
-#ifdef SEND_RAW_WR_SUPPORT
-typedef struct ibv_send_wr_raw mlx4_send_wr_t;
-#define mlx4_post_send ibv_post_send_raw
-#else
-typedef struct ibv_send_wr mlx4_send_wr_t;
-#define mlx4_post_send ibv_post_send
-#endif
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+       (((from) >= (to)) ? \
+        (((val) & (from)) / ((from) / (to))) : \
+        (((val) & (from)) * ((to) / (from))))
 
 struct mlx4_rxq_stats {
        unsigned int idx; /**< Mapping index. */
@@ -205,16 +202,17 @@ struct rxq {
                struct rxq_elt (*no_sp)[]; /* RX elements. */
        } elts;
        unsigned int sp:1; /* Use scattered RX elements. */
+       unsigned int csum:1; /* Enable checksum offloading. */
+       unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
        uint32_t mb_len; /* Length of a mp-issued mbuf. */
        struct mlx4_rxq_stats stats; /* RX queue counters. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 /* TX element. */
 struct txq_elt {
-       mlx4_send_wr_t wr; /* Work Request. */
-       struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
-       /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
+       struct rte_mbuf *buf;
 };
 
 /* Linear buffer type. It is used when transmitting buffers with too many
@@ -235,6 +233,8 @@ struct txq {
        } mp2mr[MLX4_PMD_TX_MP_CACHE]; /* MP to MR translation table. */
        struct ibv_cq *cq; /* Completion Queue. */
        struct ibv_qp *qp; /* Queue Pair. */
+       struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */
+       struct ibv_exp_cq_family *if_cq; /* CQ interface. */
 #if MLX4_PMD_MAX_INLINE > 0
        uint32_t max_inline; /* Max inline send size <= MLX4_PMD_MAX_INLINE. */
 #endif
@@ -249,6 +249,7 @@ struct txq {
        linear_t (*elts_linear)[]; /* Linearized buffers. */
        struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 struct priv {
@@ -277,6 +278,8 @@ struct priv {
        unsigned int hw_qpg:1; /* QP groups are supported. */
        unsigned int hw_tss:1; /* TSS is supported. */
        unsigned int hw_rss:1; /* RSS is supported. */
+       unsigned int hw_csum:1; /* Checksum offload is supported. */
+       unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
        unsigned int rss:1; /* RSS is enabled. */
        unsigned int vf:1; /* This is a VF device. */
 #ifdef INLINE_RECV
@@ -797,14 +800,8 @@ txq_alloc_elts(struct txq *txq, unsigned int elts_n)
        }
        for (i = 0; (i != elts_n); ++i) {
                struct txq_elt *elt = &(*elts)[i];
-               mlx4_send_wr_t *wr = &elt->wr;
 
-               /* Configure WR. */
-               WR_ID(wr->wr_id).id = i;
-               WR_ID(wr->wr_id).offset = 0;
-               wr->sg_list = &elt->sges[0];
-               wr->opcode = IBV_WR_SEND;
-               /* Other fields are updated during TX. */
+               elt->buf = NULL;
        }
        DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
        txq->elts_n = elts_n;
@@ -863,10 +860,9 @@ txq_free_elts(struct txq *txq)
        for (i = 0; (i != elemof(*elts)); ++i) {
                struct txq_elt *elt = &(*elts)[i];
 
-               if (WR_ID(elt->wr.wr_id).offset == 0)
+               if (elt->buf == NULL)
                        continue;
-               rte_pktmbuf_free((void *)((uintptr_t)elt->sges[0].addr -
-                       WR_ID(elt->wr.wr_id).offset));
+               rte_pktmbuf_free(elt->buf);
        }
        rte_free(elts);
 }
@@ -883,14 +879,48 @@ txq_free_elts(struct txq *txq)
 static void
 txq_cleanup(struct txq *txq)
 {
+       struct ibv_exp_release_intf_params params;
        size_t i;
 
        DEBUG("cleaning up %p", (void *)txq);
        txq_free_elts(txq);
+       if (txq->if_qp != NULL) {
+               assert(txq->priv != NULL);
+               assert(txq->priv->ctx != NULL);
+               assert(txq->qp != NULL);
+               params = (struct ibv_exp_release_intf_params){
+                       .comp_mask = 0,
+               };
+               claim_zero(ibv_exp_release_intf(txq->priv->ctx,
+                                               txq->if_qp,
+                                               &params));
+       }
+       if (txq->if_cq != NULL) {
+               assert(txq->priv != NULL);
+               assert(txq->priv->ctx != NULL);
+               assert(txq->cq != NULL);
+               params = (struct ibv_exp_release_intf_params){
+                       .comp_mask = 0,
+               };
+               claim_zero(ibv_exp_release_intf(txq->priv->ctx,
+                                               txq->if_cq,
+                                               &params));
+       }
        if (txq->qp != NULL)
                claim_zero(ibv_destroy_qp(txq->qp));
        if (txq->cq != NULL)
                claim_zero(ibv_destroy_cq(txq->cq));
+       if (txq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(txq->priv != NULL);
+               assert(txq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
+                                                     txq->rd,
+                                                     &attr));
+       }
        for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
                if (txq->mp2mr[i].mp == NULL)
                        break;
@@ -920,7 +950,6 @@ txq_complete(struct txq *txq)
        unsigned int elts_comp = txq->elts_comp;
        unsigned int elts_tail = txq->elts_tail;
        const unsigned int elts_n = txq->elts_n;
-       struct ibv_wc wcs[elts_comp];
        int wcs_n;
 
        if (unlikely(elts_comp == 0))
@@ -929,7 +958,7 @@ txq_complete(struct txq *txq)
        DEBUG("%p: processing %u work requests completions",
              (void *)txq, elts_comp);
 #endif
-       wcs_n = ibv_poll_cq(txq->cq, elts_comp, wcs);
+       wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
        if (unlikely(wcs_n == 0))
                return 0;
        if (unlikely(wcs_n < 0)) {
@@ -1010,6 +1039,8 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
        return txq->mp2mr[i].lkey;
 }
 
+#if MLX4_PMD_SGE_WR_N > 1
+
 /**
  * Copy scattered mbuf contents to a single linear buffer.
  *
@@ -1042,6 +1073,120 @@ linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
        return size;
 }
 
+/**
+ * Handle scattered buffers for mlx4_tx_burst().
+ *
+ * @param txq
+ *   TX queue structure.
+ * @param segs
+ *   Number of segments in buf.
+ * @param elt
+ *   TX queue element to fill.
+ * @param[in] buf
+ *   Buffer to process.
+ * @param elts_head
+ *   Index of the linear buffer to use if necessary (normally txq->elts_head).
+ * @param[out] sges
+ *   Array filled with SGEs on success.
+ *
+ * @return
+ *   A structure containing the processed packet size in bytes and the
+ *   number of SGEs. Both fields are set to (unsigned int)-1 in case of
+ *   failure.
+ */
+static struct tx_burst_sg_ret {
+       unsigned int length;
+       unsigned int num;
+}
+tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
+           struct rte_mbuf *buf, unsigned int elts_head,
+           struct ibv_sge (*sges)[MLX4_PMD_SGE_WR_N])
+{
+       unsigned int sent_size = 0;
+       unsigned int j;
+       int linearize = 0;
+
+       /* When there are too many segments, extra segments are
+        * linearized in the last SGE. */
+       if (unlikely(segs > elemof(*sges))) {
+               segs = (elemof(*sges) - 1);
+               linearize = 1;
+       }
+       /* Update element. */
+       elt->buf = buf;
+       /* Register segments as SGEs. */
+       for (j = 0; (j != segs); ++j) {
+               struct ibv_sge *sge = &(*sges)[j];
+               uint32_t lkey;
+
+               /* Retrieve Memory Region key for this memory pool. */
+               lkey = txq_mp2mr(txq, buf->pool);
+               if (unlikely(lkey == (uint32_t)-1)) {
+                       /* MR does not exist. */
+                       DEBUG("%p: unable to get MP <-> MR association",
+                             (void *)txq);
+                       /* Clean up TX element. */
+                       elt->buf = NULL;
+                       goto stop;
+               }
+               /* Update SGE. */
+               sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+               if (txq->priv->vf)
+                       rte_prefetch0((volatile void *)
+                                     (uintptr_t)sge->addr);
+               sge->length = DATA_LEN(buf);
+               sge->lkey = lkey;
+               sent_size += sge->length;
+               buf = NEXT(buf);
+       }
+       /* If buf is not NULL here and is not going to be linearized,
+        * nb_segs is not valid. */
+       assert(j == segs);
+       assert((buf == NULL) || (linearize));
+       /* Linearize extra segments. */
+       if (linearize) {
+               struct ibv_sge *sge = &(*sges)[segs];
+               linear_t *linear = &(*txq->elts_linear)[elts_head];
+               unsigned int size = linearize_mbuf(linear, buf);
+
+               assert(segs == (elemof(*sges) - 1));
+               if (size == 0) {
+                       /* Invalid packet. */
+                       DEBUG("%p: packet too large to be linearized.",
+                             (void *)txq);
+                       /* Clean up TX element. */
+                       elt->buf = NULL;
+                       goto stop;
+               }
+               /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately. */
+               if (elemof(*sges) == 1) {
+                       do {
+                               struct rte_mbuf *next = NEXT(buf);
+
+                               rte_pktmbuf_free_seg(buf);
+                               buf = next;
+                       } while (buf != NULL);
+                       elt->buf = NULL;
+               }
+               /* Update SGE. */
+               sge->addr = (uintptr_t)&(*linear)[0];
+               sge->length = size;
+               sge->lkey = txq->mr_linear->lkey;
+               sent_size += size;
+       }
+       return (struct tx_burst_sg_ret){
+               .length = sent_size,
+               .num = segs,
+       };
+stop:
+       return (struct tx_burst_sg_ret){
+               .length = -1,
+               .num = -1,
+       };
+}
+
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
+
 /**
  * DPDK callback for TX.
  *
@@ -1059,9 +1204,6 @@ static uint16_t
 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
-       mlx4_send_wr_t head;
-       mlx4_send_wr_t **wr_next = &head.next;
-       mlx4_send_wr_t *bad_wr;
        unsigned int elts_head = txq->elts_head;
        const unsigned int elts_tail = txq->elts_tail;
        const unsigned int elts_n = txq->elts_n;
@@ -1086,20 +1228,19 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                max = pkts_n;
        for (i = 0; (i != max); ++i) {
                struct rte_mbuf *buf = pkts[i];
+               unsigned int elts_head_next =
+                       (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
+               struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
                struct txq_elt *elt = &(*txq->elts)[elts_head];
-               mlx4_send_wr_t *wr = &elt->wr;
                unsigned int segs = NB_SEGS(buf);
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
+#ifdef MLX4_PMD_SOFT_COUNTERS
                unsigned int sent_size = 0;
 #endif
-               unsigned int j;
-               int linearize = 0;
+               uint32_t send_flags = 0;
 
                /* Clean up old buffer. */
-               if (likely(WR_ID(wr->wr_id).offset != 0)) {
-                       struct rte_mbuf *tmp = (void *)
-                               ((uintptr_t)elt->sges[0].addr -
-                                WR_ID(wr->wr_id).offset);
+               if (likely(elt->buf != NULL)) {
+                       struct rte_mbuf *tmp = elt->buf;
 
                        /* Faster than rte_pktmbuf_free(). */
                        do {
@@ -1109,43 +1250,37 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                tmp = next;
                        } while (tmp != NULL);
                }
-#ifndef NDEBUG
-               /* For assert(). */
-               WR_ID(wr->wr_id).offset = 0;
-               for (j = 0; ((int)j < wr->num_sge); ++j) {
-                       elt->sges[j].addr = 0;
-                       elt->sges[j].length = 0;
-                       elt->sges[j].lkey = 0;
+               /* Request TX completion. */
+               if (unlikely(--elts_comp_cd == 0)) {
+                       elts_comp_cd = txq->elts_comp_cd_init;
+                       ++elts_comp;
+                       send_flags |= IBV_EXP_QP_BURST_SIGNALED;
                }
-               wr->next = NULL;
-               wr->num_sge = 0;
-#endif
-               /* Sanity checks, most of which are only relevant with
-                * debugging enabled. */
-               assert(WR_ID(wr->wr_id).id == elts_head);
-               assert(WR_ID(wr->wr_id).offset == 0);
-               assert(wr->next == NULL);
-               assert(wr->sg_list == &elt->sges[0]);
-               assert(wr->num_sge == 0);
-               assert(wr->opcode == IBV_WR_SEND);
-               /* When there are too many segments, extra segments are
-                * linearized in the last SGE. */
-               if (unlikely(segs > elemof(elt->sges))) {
-                       segs = (elemof(elt->sges) - 1);
-                       linearize = 1;
+               /* Should we enable HW CKSUM offload */
+               if (buf->ol_flags &
+                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+                       send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+                       /* HW does not support checksum offloads at arbitrary
+                        * offsets but automatically recognizes the packet
+                        * type. For inner L3/L4 checksums, only VXLAN (UDP)
+                        * tunnels are currently supported.
+                        *
+                        * FIXME: since PKT_TX_UDP_TUNNEL_PKT has been removed,
+                        * the outer packet type is unknown. All we know is
+                        * that the L2 header is of unusual length (not
+                        * ETHER_HDR_LEN with or without 802.1Q header). */
+                       if ((buf->l2_len != ETHER_HDR_LEN) &&
+                           (buf->l2_len != (ETHER_HDR_LEN + 4)))
+                               send_flags |= IBV_EXP_QP_BURST_TUNNEL;
                }
-               /* Set WR fields. */
-               assert((rte_pktmbuf_mtod(buf, uintptr_t) -
-                       (uintptr_t)buf) <= 0xffff);
-               WR_ID(wr->wr_id).offset =
-                       (rte_pktmbuf_mtod(buf, uintptr_t) -
-                        (uintptr_t)buf);
-               wr->num_sge = segs;
-               /* Register segments as SGEs. */
-               for (j = 0; (j != segs); ++j) {
-                       struct ibv_sge *sge = &elt->sges[j];
+               if (likely(segs == 1)) {
+                       uintptr_t addr;
+                       uint32_t length;
                        uint32_t lkey;
 
+                       /* Retrieve buffer information. */
+                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
+                       length = DATA_LEN(buf);
                        /* Retrieve Memory Region key for this memory pool. */
                        lkey = txq_mp2mr(txq, buf->pool);
                        if (unlikely(lkey == (uint32_t)-1)) {
@@ -1153,110 +1288,64 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                DEBUG("%p: unable to get MP <-> MR"
                                      " association", (void *)txq);
                                /* Clean up TX element. */
-                               WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
-                               /* For assert(). */
-                               while (j) {
-                                       --j;
-                                       --sge;
-                                       sge->addr = 0;
-                                       sge->length = 0;
-                                       sge->lkey = 0;
-                               }
-                               wr->num_sge = 0;
-#endif
+                               elt->buf = NULL;
                                goto stop;
                        }
-                       /* Sanity checks, only relevant with debugging
-                        * enabled. */
-                       assert(sge->addr == 0);
-                       assert(sge->length == 0);
-                       assert(sge->lkey == 0);
-                       /* Update SGE. */
-                       sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+                       /* Update element. */
+                       elt->buf = buf;
                        if (txq->priv->vf)
                                rte_prefetch0((volatile void *)
-                                       (uintptr_t)sge->addr);
-                       sge->length = DATA_LEN(buf);
-                       sge->lkey = lkey;
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
-                       sent_size += sge->length;
-#endif
-                       buf = NEXT(buf);
-               }
-               /* If buf is not NULL here and is not going to be linearized,
-                * nb_segs is not valid. */
-               assert(j == segs);
-               assert((buf == NULL) || (linearize));
-               /* Linearize extra segments. */
-               if (linearize) {
-                       struct ibv_sge *sge = &elt->sges[segs];
-                       linear_t *linear = &(*txq->elts_linear)[elts_head];
-                       unsigned int size = linearize_mbuf(linear, buf);
-
-                       assert(segs == (elemof(elt->sges) - 1));
-                       if (size == 0) {
-                               /* Invalid packet. */
-                               DEBUG("%p: packet too large to be linearized.",
-                                     (void *)txq);
-                               /* Clean up TX element. */
-                               WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
-                               /* For assert(). */
-                               while (j) {
-                                       --j;
-                                       --sge;
-                                       sge->addr = 0;
-                                       sge->length = 0;
-                                       sge->lkey = 0;
-                               }
-                               wr->num_sge = 0;
+                                             (uintptr_t)addr);
+                       RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+                       /* Put packet into send queue. */
+#if MLX4_PMD_MAX_INLINE > 0
+                       if (length <= txq->max_inline)
+                               err = txq->if_qp->send_pending_inline
+                                       (txq->qp,
+                                        (void *)addr,
+                                        length,
+                                        send_flags);
+                       else
 #endif
+                               err = txq->if_qp->send_pending
+                                       (txq->qp,
+                                        addr,
+                                        length,
+                                        lkey,
+                                        send_flags);
+                       if (unlikely(err))
                                goto stop;
-                       }
-                       /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately
-                        * and clear offset from WR ID. */
-                       if (elemof(elt->sges) == 1) {
-                               do {
-                                       struct rte_mbuf *next = NEXT(buf);
-
-                                       rte_pktmbuf_free_seg(buf);
-                                       buf = next;
-                               } while (buf != NULL);
-                               WR_ID(wr->wr_id).offset = 0;
-                       }
-                       /* Set WR fields and fill SGE with linear buffer. */
-                       ++wr->num_sge;
-                       /* Sanity checks, only relevant with debugging
-                        * enabled. */
-                       assert(sge->addr == 0);
-                       assert(sge->length == 0);
-                       assert(sge->lkey == 0);
-                       /* Update SGE. */
-                       sge->addr = (uintptr_t)&(*linear)[0];
-                       sge->length = size;
-                       sge->lkey = txq->mr_linear->lkey;
-#if (MLX4_PMD_MAX_INLINE > 0) || defined(MLX4_PMD_SOFT_COUNTERS)
-                       sent_size += size;
+#ifdef MLX4_PMD_SOFT_COUNTERS
+                       sent_size += length;
 #endif
-               }
-               /* Link WRs together for ibv_post_send(). */
-               *wr_next = wr;
-               wr_next = &wr->next;
-#if MLX4_PMD_MAX_INLINE > 0
-               if (sent_size <= txq->max_inline)
-                       wr->send_flags = IBV_SEND_INLINE;
-               else
+               } else {
+#if MLX4_PMD_SGE_WR_N > 1
+                       struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
+                       struct tx_burst_sg_ret ret;
+
+                       ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
+                                         &sges);
+                       if (ret.length == (unsigned int)-1)
+                               goto stop;
+                       RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+                       /* Put SG list into send queue. */
+                       err = txq->if_qp->send_pending_sg_list
+                               (txq->qp,
+                                sges,
+                                ret.num,
+                                send_flags);
+                       if (unlikely(err))
+                               goto stop;
+#ifdef MLX4_PMD_SOFT_COUNTERS
+                       sent_size += ret.length;
 #endif
-                       wr->send_flags = 0;
-               /* Request TX completion. */
-               if (unlikely(--elts_comp_cd == 0)) {
-                       elts_comp_cd = txq->elts_comp_cd_init;
-                       ++elts_comp;
-                       wr->send_flags |= IBV_SEND_SIGNALED;
+#else /* MLX4_PMD_SGE_WR_N > 1 */
+                       DEBUG("%p: TX scattered buffers support not"
+                             " compiled in", (void *)txq);
+                       goto stop;
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
                }
-               if (++elts_head >= elts_n)
-                       elts_head = 0;
+               elts_head = elts_head_next;
 #ifdef MLX4_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += sent_size;
@@ -1270,63 +1359,13 @@ stop:
        /* Increment sent packets counter. */
        txq->stats.opackets += i;
 #endif
-       *wr_next = NULL;
-       err = mlx4_post_send(txq->qp, head.next, &bad_wr);
+       /* Ring QP doorbell. */
+       err = txq->if_qp->send_flush(txq->qp);
        if (unlikely(err)) {
-               unsigned int unsent = 0;
-
-               /* An error occurred, fix counters. */
-               while (bad_wr != NULL) {
-                       struct txq_elt *elt =
-                               containerof(bad_wr, struct txq_elt, wr);
-                       mlx4_send_wr_t *wr = &elt->wr;
-                       mlx4_send_wr_t *next = wr->next;
-#if defined(MLX4_PMD_SOFT_COUNTERS) || !defined(NDEBUG)
-                       unsigned int j;
-#endif
-
-                       assert(wr == bad_wr);
-                       /* Clean up TX element without freeing it, caller
-                        * should take care of this. */
-                       WR_ID(elt->wr.wr_id).offset = 0;
-#ifdef MLX4_PMD_SOFT_COUNTERS
-                       for (j = 0; ((int)j < wr->num_sge); ++j)
-                               txq->stats.obytes -= wr->sg_list[j].length;
-#endif
-                       ++unsent;
-                       if (wr->send_flags & IBV_SEND_SIGNALED) {
-                               assert(elts_comp != 0);
-                               --elts_comp;
-                       }
-                       if (elts_comp_cd == txq->elts_comp_cd_init)
-                               elts_comp_cd = 1;
-                       else
-                               ++elts_comp_cd;
-#ifndef NDEBUG
-                       /* For assert(). */
-                       for (j = 0; ((int)j < wr->num_sge); ++j) {
-                               elt->sges[j].addr = 0;
-                               elt->sges[j].length = 0;
-                               elt->sges[j].lkey = 0;
-                       }
-                       wr->next = NULL;
-                       wr->num_sge = 0;
-#endif
-                       bad_wr = next;
-               }
-#ifdef MLX4_PMD_SOFT_COUNTERS
-               txq->stats.opackets -= unsent;
-#endif
-               assert(i >= unsent);
-               i -= unsent;
-               /* "Unsend" remaining packets. */
-               elts_head -= unsent;
-               if (elts_head >= elts_n)
-                       elts_head += elts_n;
-               assert(elts_head < elts_n);
-               DEBUG("%p: mlx4_post_send() failed, %u unprocessed WRs: %s",
-                     (void *)txq, unsent,
-                     ((err <= -1) ? "Internal error" : strerror(err)));
+               /* A nonzero value is not supposed to be returned.
+                * Nothing can be done about it. */
+               DEBUG("%p: send_flush() failed with error %d",
+                     (void *)txq, err);
        }
        txq->elts_head = elts_head;
        txq->elts_comp += elts_comp;
@@ -1361,9 +1400,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .socket = socket
        };
        union {
-               struct ibv_qp_init_attr init;
+               struct ibv_exp_query_intf_params params;
+               struct ibv_exp_qp_init_attr init;
+               struct ibv_exp_res_domain_init_attr rd;
+               struct ibv_exp_cq_init_attr cq;
                struct ibv_exp_qp_attr mod;
        } attr;
+       enum ibv_exp_query_intf_status status;
        int ret = 0;
 
        (void)conf; /* Thresholds configuration (ignored). */
@@ -1374,7 +1417,24 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
        }
        desc /= MLX4_PMD_SGE_WR_N;
        /* MRs will be registered in mp2mr[] later. */
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -1385,7 +1445,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
              priv->device_attr.max_qp_wr);
        DEBUG("priv->device_attr.max_sge is %d",
              priv->device_attr.max_sge);
-       attr.init = (struct ibv_qp_init_attr){
+       attr.init = (struct ibv_exp_qp_init_attr){
                /* CQ to be associated with the send queue. */
                .send_cq = tmpl.cq,
                /* CQ to be associated with the receive queue. */
@@ -1407,9 +1467,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .qp_type = IBV_QPT_RAW_PACKET,
                /* Do *NOT* enable this, completions events are managed per
                 * TX burst. */
-               .sq_sig_all = 0
+               .sq_sig_all = 0,
+               .pd = priv->pd,
+               .res_domain = tmpl.rd,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
        };
-       tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
+       tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -1455,6 +1519,35 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                      (void *)dev, strerror(ret));
                goto error;
        }
+       attr.params = (struct ibv_exp_query_intf_params){
+               .intf_scope = IBV_EXP_INTF_GLOBAL,
+               .intf = IBV_EXP_INTF_CQ,
+               .obj = tmpl.cq,
+       };
+       tmpl.if_cq = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+       if (tmpl.if_cq == NULL) {
+               ERROR("%p: CQ interface family query failed with status %d",
+                     (void *)dev, status);
+               goto error;
+       }
+       attr.params = (struct ibv_exp_query_intf_params){
+               .intf_scope = IBV_EXP_INTF_GLOBAL,
+               .intf = IBV_EXP_INTF_QP_BURST,
+               .obj = tmpl.qp,
+#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK
+               /* MC loopback must be disabled when not using a VF. */
+               .family_flags =
+                       (!priv->vf ?
+                        IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK :
+                        0),
+#endif
+       };
+       tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
+       if (tmpl.if_qp == NULL) {
+               ERROR("%p: QP interface family query failed with status %d",
+                     (void *)dev, status);
+               goto error;
+       }
        /* Clean up txq in case we're reinitializing it. */
        DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
        txq_cleanup(txq);
@@ -2376,11 +2469,71 @@ rxq_cleanup(struct rxq *rxq)
        }
        if (rxq->cq != NULL)
                claim_zero(ibv_destroy_cq(rxq->cq));
+       if (rxq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(rxq->priv != NULL);
+               assert(rxq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
+                                                     rxq->rd,
+                                                     &attr));
+       }
        if (rxq->mr != NULL)
                claim_zero(ibv_dereg_mr(rxq->mr));
        memset(rxq, 0, sizeof(*rxq));
 }
 
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+{
+       uint32_t ol_flags;
+
+       ol_flags =
+               TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV4_PACKET, PKT_RX_IPV4_HDR) |
+               TRANSPOSE(flags, IBV_EXP_CQ_RX_IPV6_PACKET, PKT_RX_IPV6_HDR);
+       if (rxq->csum)
+               ol_flags |=
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_BAD) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_BAD);
+       /*
+        * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+        * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+        * (its value is 0).
+        */
+       if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+               ol_flags |=
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET,
+                                 PKT_RX_TUNNEL_IPV4_HDR) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET,
+                                 PKT_RX_TUNNEL_IPV6_HDR) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_BAD) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_BAD);
+       return ol_flags;
+}
+
 static uint16_t
 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
 
@@ -2425,6 +2578,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                struct rte_mbuf **pkt_buf_next = &pkt_buf;
                unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
                unsigned int j = 0;
+               uint32_t flags;
 
                /* Sanity checks. */
 #ifdef NDEBUG
@@ -2435,7 +2589,8 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(wr->num_sge == elemof(elt->sges));
                assert(elts_head < rxq->elts_n);
                assert(rxq->elts_head < rxq->elts_n);
-               ret = rxq->if_cq->poll_length(rxq->cq, NULL, NULL);
+               ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+                                                   &flags);
                if (unlikely(ret < 0)) {
                        struct ibv_wc wc;
                        int wcs_n;
@@ -2561,7 +2716,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NB_SEGS(pkt_buf) = j;
                PORT(pkt_buf) = rxq->port_id;
                PKT_LEN(pkt_buf) = pkt_buf_len;
-               pkt_buf->ol_flags = 0;
+               pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
                /* Return packet. */
                *(pkts++) = pkt_buf;
@@ -2638,6 +2793,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
                        WR_ID(wr_id).offset);
                struct rte_mbuf *rep;
+               uint32_t flags;
 
                /* Sanity checks. */
                assert(WR_ID(wr_id).id < rxq->elts_n);
@@ -2645,7 +2801,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(wr->num_sge == 1);
                assert(elts_head < rxq->elts_n);
                assert(rxq->elts_head < rxq->elts_n);
-               ret = rxq->if_cq->poll_length(rxq->cq, NULL, NULL);
+               ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+                                                   &flags);
                if (unlikely(ret < 0)) {
                        struct ibv_wc wc;
                        int wcs_n;
@@ -2719,7 +2876,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NEXT(seg) = NULL;
                PKT_LEN(seg) = len;
                DATA_LEN(seg) = len;
-               seg->ol_flags = 0;
+               seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
                /* Return packet. */
                *(pkts++) = seg;
@@ -2770,7 +2927,8 @@ repost:
  *   QP pointer or NULL in case of error.
  */
 static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
+            struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2789,8 +2947,10 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
                                         MLX4_PMD_SGE_WR_N),
                },
                .qp_type = IBV_QPT_RAW_PACKET,
-               .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
                .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2820,7 +2980,7 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
  */
 static struct ibv_qp *
 rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
-                int parent)
+                int parent, struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2840,8 +3000,10 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
                },
                .qp_type = IBV_QPT_RAW_PACKET,
                .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
                              IBV_EXP_QP_INIT_ATTR_QPG),
-               .pd = priv->pd
+               .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2902,6 +3064,15 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
        /* Number of descriptors and mbufs currently allocated. */
        desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
        mbuf_n = desc_n;
+       /* Toggle RX checksum offload if hardware supports it. */
+       if (priv->hw_csum) {
+               tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+               rxq->csum = tmpl.csum;
+       }
+       if (priv->hw_csum_l2tun) {
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+               rxq->csum_l2tun = tmpl.csum_l2tun;
+       }
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3088,6 +3259,8 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
        struct ibv_exp_qp_attr mod;
        union {
                struct ibv_exp_query_intf_params params;
+               struct ibv_exp_cq_init_attr cq;
+               struct ibv_exp_res_domain_init_attr rd;
        } attr;
        enum ibv_exp_query_intf_status status;
        struct ibv_recv_wr *bad_wr;
@@ -3123,6 +3296,11 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
                rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
        assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
        rte_pktmbuf_free(buf);
+       /* Toggle RX checksum offload if hardware supports it. */
+       if (priv->hw_csum)
+               tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+       if (priv->hw_csum_l2tun)
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3145,7 +3323,24 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
                goto error;
        }
 skip_mr:
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -3158,10 +3353,11 @@ skip_mr:
              priv->device_attr.max_sge);
 #ifdef RSS_SUPPORT
        if (priv->rss)
-               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent);
+               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
+                                          tmpl.rd);
        else
 #endif /* RSS_SUPPORT */
-               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
+               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -3620,6 +3816,18 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        info->max_rx_queues = max;
        info->max_tx_queues = max;
        info->max_mac_addrs = elemof(priv->mac);
+       info->rx_offload_capa =
+               (priv->hw_csum ?
+                (DEV_RX_OFFLOAD_IPV4_CKSUM |
+                 DEV_RX_OFFLOAD_UDP_CKSUM |
+                 DEV_RX_OFFLOAD_TCP_CKSUM) :
+                0);
+       info->tx_offload_capa =
+               (priv->hw_csum ?
+                (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                 DEV_TX_OFFLOAD_UDP_CKSUM |
+                 DEV_TX_OFFLOAD_TCP_CKSUM) :
+                0);
        priv_unlock(priv);
 }
 
@@ -4338,6 +4546,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
        .mac_addr_remove = mlx4_mac_addr_remove,
        .mac_addr_add = mlx4_mac_addr_add,
        .mtu_set = mlx4_dev_set_mtu,
+       .udp_tunnel_add = NULL,
+       .udp_tunnel_del = NULL,
        .fdir_add_signature_filter = NULL,
        .fdir_update_signature_filter = NULL,
        .fdir_remove_signature_filter = NULL,
@@ -4660,6 +4870,19 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                              exp_device_attr.max_rss_tbl_sz);
 #endif /* RSS_SUPPORT */
 
+               priv->hw_csum =
+                       ((exp_device_attr.exp_device_cap_flags &
+                         IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
+                        (exp_device_attr.exp_device_cap_flags &
+                         IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
+               DEBUG("checksum offloading is %ssupported",
+                     (priv->hw_csum ? "" : "not "));
+
+               priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
+                                        IBV_EXP_DEVICE_VXLAN_SUPPORT);
+               DEBUG("L2 tunnel checksum offloads are %ssupported",
+                     (priv->hw_csum_l2tun ? "" : "not "));
+
 #ifdef INLINE_RECV
                priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE");