mbuf: remove packet type from offload flags
[dpdk.git] / drivers / net / mlx4 / mlx4.c
index 3dff64d..6c6342f 100644 (file)
@@ -139,6 +139,12 @@ static inline void wr_id_t_check(void)
        (void)wr_id_t_check;
 }
 
+/* Transpose flags. Useful to convert IBV to DPDK flags. */
+#define TRANSPOSE(val, from, to) \
+       (((from) >= (to)) ? \
+        (((val) & (from)) / ((from) / (to))) : \
+        (((val) & (from)) * ((to) / (from))))
+
 struct mlx4_rxq_stats {
        unsigned int idx; /**< Mapping index. */
 #ifdef MLX4_PMD_SOFT_COUNTERS
@@ -196,16 +202,17 @@ struct rxq {
                struct rxq_elt (*no_sp)[]; /* RX elements. */
        } elts;
        unsigned int sp:1; /* Use scattered RX elements. */
+       unsigned int csum:1; /* Enable checksum offloading. */
+       unsigned int csum_l2tun:1; /* Same for L2 tunnels. */
        uint32_t mb_len; /* Length of a mp-issued mbuf. */
        struct mlx4_rxq_stats stats; /* RX queue counters. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 /* TX element. */
 struct txq_elt {
-       struct ibv_send_wr wr; /* Work Request. */
-       struct ibv_sge sges[MLX4_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */
-       /* mbuf pointer is derived from WR_ID(wr.wr_id).offset. */
+       struct rte_mbuf *buf;
 };
 
 /* Linear buffer type. It is used when transmitting buffers with too many
@@ -242,6 +249,7 @@ struct txq {
        linear_t (*elts_linear)[]; /* Linearized buffers. */
        struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */
        unsigned int socket; /* CPU socket ID for allocations. */
+       struct ibv_exp_res_domain *rd; /* Resource Domain. */
 };
 
 struct priv {
@@ -270,6 +278,8 @@ struct priv {
        unsigned int hw_qpg:1; /* QP groups are supported. */
        unsigned int hw_tss:1; /* TSS is supported. */
        unsigned int hw_rss:1; /* RSS is supported. */
+       unsigned int hw_csum:1; /* Checksum offload is supported. */
+       unsigned int hw_csum_l2tun:1; /* Same for L2 tunnels. */
        unsigned int rss:1; /* RSS is enabled. */
        unsigned int vf:1; /* This is a VF device. */
 #ifdef INLINE_RECV
@@ -790,14 +800,8 @@ txq_alloc_elts(struct txq *txq, unsigned int elts_n)
        }
        for (i = 0; (i != elts_n); ++i) {
                struct txq_elt *elt = &(*elts)[i];
-               struct ibv_send_wr *wr = &elt->wr;
 
-               /* Configure WR. */
-               WR_ID(wr->wr_id).id = i;
-               WR_ID(wr->wr_id).offset = 0;
-               wr->sg_list = &elt->sges[0];
-               wr->opcode = IBV_WR_SEND;
-               /* Other fields are updated during TX. */
+               elt->buf = NULL;
        }
        DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n);
        txq->elts_n = elts_n;
@@ -856,10 +860,9 @@ txq_free_elts(struct txq *txq)
        for (i = 0; (i != elemof(*elts)); ++i) {
                struct txq_elt *elt = &(*elts)[i];
 
-               if (WR_ID(elt->wr.wr_id).offset == 0)
+               if (elt->buf == NULL)
                        continue;
-               rte_pktmbuf_free((void *)((uintptr_t)elt->sges[0].addr -
-                       WR_ID(elt->wr.wr_id).offset));
+               rte_pktmbuf_free(elt->buf);
        }
        rte_free(elts);
 }
@@ -907,6 +910,17 @@ txq_cleanup(struct txq *txq)
                claim_zero(ibv_destroy_qp(txq->qp));
        if (txq->cq != NULL)
                claim_zero(ibv_destroy_cq(txq->cq));
+       if (txq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(txq->priv != NULL);
+               assert(txq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx,
+                                                     txq->rd,
+                                                     &attr));
+       }
        for (i = 0; (i != elemof(txq->mp2mr)); ++i) {
                if (txq->mp2mr[i].mp == NULL)
                        break;
@@ -1025,6 +1039,8 @@ txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
        return txq->mp2mr[i].lkey;
 }
 
+#if MLX4_PMD_SGE_WR_N > 1
+
 /**
  * Copy scattered mbuf contents to a single linear buffer.
  *
@@ -1057,6 +1073,120 @@ linearize_mbuf(linear_t *linear, struct rte_mbuf *buf)
        return size;
 }
 
+/**
+ * Handle scattered buffers for mlx4_tx_burst().
+ *
+ * @param txq
+ *   TX queue structure.
+ * @param segs
+ *   Number of segments in buf.
+ * @param elt
+ *   TX queue element to fill.
+ * @param[in] buf
+ *   Buffer to process.
+ * @param elts_head
+ *   Index of the linear buffer to use if necessary (normally txq->elts_head).
+ * @param[out] sges
+ *   Array filled with SGEs on success.
+ *
+ * @return
+ *   A structure containing the processed packet size in bytes and the
+ *   number of SGEs. Both fields are set to (unsigned int)-1 in case of
+ *   failure.
+ */
+static struct tx_burst_sg_ret {
+       unsigned int length;
+       unsigned int num;
+}
+tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt,
+           struct rte_mbuf *buf, unsigned int elts_head,
+           struct ibv_sge (*sges)[MLX4_PMD_SGE_WR_N])
+{
+       unsigned int sent_size = 0;
+       unsigned int j;
+       int linearize = 0;
+
+       /* When there are too many segments, extra segments are
+        * linearized in the last SGE. */
+       if (unlikely(segs > elemof(*sges))) {
+               segs = (elemof(*sges) - 1);
+               linearize = 1;
+       }
+       /* Update element. */
+       elt->buf = buf;
+       /* Register segments as SGEs. */
+       for (j = 0; (j != segs); ++j) {
+               struct ibv_sge *sge = &(*sges)[j];
+               uint32_t lkey;
+
+               /* Retrieve Memory Region key for this memory pool. */
+               lkey = txq_mp2mr(txq, buf->pool);
+               if (unlikely(lkey == (uint32_t)-1)) {
+                       /* MR does not exist. */
+                       DEBUG("%p: unable to get MP <-> MR association",
+                             (void *)txq);
+                       /* Clean up TX element. */
+                       elt->buf = NULL;
+                       goto stop;
+               }
+               /* Update SGE. */
+               sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+               if (txq->priv->vf)
+                       rte_prefetch0((volatile void *)
+                                     (uintptr_t)sge->addr);
+               sge->length = DATA_LEN(buf);
+               sge->lkey = lkey;
+               sent_size += sge->length;
+               buf = NEXT(buf);
+       }
+       /* If buf is not NULL here and is not going to be linearized,
+        * nb_segs is not valid. */
+       assert(j == segs);
+       assert((buf == NULL) || (linearize));
+       /* Linearize extra segments. */
+       if (linearize) {
+               struct ibv_sge *sge = &(*sges)[segs];
+               linear_t *linear = &(*txq->elts_linear)[elts_head];
+               unsigned int size = linearize_mbuf(linear, buf);
+
+               assert(segs == (elemof(*sges) - 1));
+               if (size == 0) {
+                       /* Invalid packet. */
+                       DEBUG("%p: packet too large to be linearized.",
+                             (void *)txq);
+                       /* Clean up TX element. */
+                       elt->buf = NULL;
+                       goto stop;
+               }
+               /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately. */
+               if (elemof(*sges) == 1) {
+                       do {
+                               struct rte_mbuf *next = NEXT(buf);
+
+                               rte_pktmbuf_free_seg(buf);
+                               buf = next;
+                       } while (buf != NULL);
+                       elt->buf = NULL;
+               }
+               /* Update SGE. */
+               sge->addr = (uintptr_t)&(*linear)[0];
+               sge->length = size;
+               sge->lkey = txq->mr_linear->lkey;
+               sent_size += size;
+       }
+       return (struct tx_burst_sg_ret){
+               .length = sent_size,
+               .num = segs,
+       };
+stop:
+       return (struct tx_burst_sg_ret){
+               .length = -1,
+               .num = -1,
+       };
+}
+
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
+
 /**
  * DPDK callback for TX.
  *
@@ -1074,8 +1204,6 @@ static uint16_t
 mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
        struct txq *txq = (struct txq *)dpdk_txq;
-       struct ibv_send_wr head;
-       struct ibv_send_wr **wr_next = &head.next;
        unsigned int elts_head = txq->elts_head;
        const unsigned int elts_tail = txq->elts_tail;
        const unsigned int elts_n = txq->elts_n;
@@ -1100,21 +1228,19 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                max = pkts_n;
        for (i = 0; (i != max); ++i) {
                struct rte_mbuf *buf = pkts[i];
+               unsigned int elts_head_next =
+                       (((elts_head + 1) == elts_n) ? 0 : elts_head + 1);
+               struct txq_elt *elt_next = &(*txq->elts)[elts_head_next];
                struct txq_elt *elt = &(*txq->elts)[elts_head];
-               struct ibv_send_wr *wr = &elt->wr;
                unsigned int segs = NB_SEGS(buf);
 #ifdef MLX4_PMD_SOFT_COUNTERS
                unsigned int sent_size = 0;
 #endif
-               unsigned int j;
-               int linearize = 0;
                uint32_t send_flags = 0;
 
                /* Clean up old buffer. */
-               if (likely(WR_ID(wr->wr_id).offset != 0)) {
-                       struct rte_mbuf *tmp = (void *)
-                               ((uintptr_t)elt->sges[0].addr -
-                                WR_ID(wr->wr_id).offset);
+               if (likely(elt->buf != NULL)) {
+                       struct rte_mbuf *tmp = elt->buf;
 
                        /* Faster than rte_pktmbuf_free(). */
                        do {
@@ -1124,43 +1250,31 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                tmp = next;
                        } while (tmp != NULL);
                }
-#ifndef NDEBUG
-               /* For assert(). */
-               WR_ID(wr->wr_id).offset = 0;
-               for (j = 0; ((int)j < wr->num_sge); ++j) {
-                       elt->sges[j].addr = 0;
-                       elt->sges[j].length = 0;
-                       elt->sges[j].lkey = 0;
+               /* Request TX completion. */
+               if (unlikely(--elts_comp_cd == 0)) {
+                       elts_comp_cd = txq->elts_comp_cd_init;
+                       ++elts_comp;
+                       send_flags |= IBV_EXP_QP_BURST_SIGNALED;
                }
-               wr->next = NULL;
-               wr->num_sge = 0;
-#endif
-               /* Sanity checks, most of which are only relevant with
-                * debugging enabled. */
-               assert(WR_ID(wr->wr_id).id == elts_head);
-               assert(WR_ID(wr->wr_id).offset == 0);
-               assert(wr->next == NULL);
-               assert(wr->sg_list == &elt->sges[0]);
-               assert(wr->num_sge == 0);
-               assert(wr->opcode == IBV_WR_SEND);
-               /* When there are too many segments, extra segments are
-                * linearized in the last SGE. */
-               if (unlikely(segs > elemof(elt->sges))) {
-                       segs = (elemof(elt->sges) - 1);
-                       linearize = 1;
+               /* Should we enable HW CKSUM offload */
+               if (buf->ol_flags &
+                   (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
+                       send_flags |= IBV_EXP_QP_BURST_IP_CSUM;
+                       /* HW does not support checksum offloads at arbitrary
+                        * offsets but automatically recognizes the packet
+                        * type. For inner L3/L4 checksums, only VXLAN (UDP)
+                        * tunnels are currently supported. */
+                       if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type))
+                               send_flags |= IBV_EXP_QP_BURST_TUNNEL;
                }
-               /* Set WR fields. */
-               assert((rte_pktmbuf_mtod(buf, uintptr_t) -
-                       (uintptr_t)buf) <= 0xffff);
-               WR_ID(wr->wr_id).offset =
-                       (rte_pktmbuf_mtod(buf, uintptr_t) -
-                        (uintptr_t)buf);
-               wr->num_sge = segs;
-               /* Register segments as SGEs. */
-               for (j = 0; (j != segs); ++j) {
-                       struct ibv_sge *sge = &elt->sges[j];
+               if (likely(segs == 1)) {
+                       uintptr_t addr;
+                       uint32_t length;
                        uint32_t lkey;
 
+                       /* Retrieve buffer information. */
+                       addr = rte_pktmbuf_mtod(buf, uintptr_t);
+                       length = DATA_LEN(buf);
                        /* Retrieve Memory Region key for this memory pool. */
                        lkey = txq_mp2mr(txq, buf->pool);
                        if (unlikely(lkey == (uint32_t)-1)) {
@@ -1168,127 +1282,68 @@ mlx4_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                DEBUG("%p: unable to get MP <-> MR"
                                      " association", (void *)txq);
                                /* Clean up TX element. */
-                               WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
-                               /* For assert(). */
-                               while (j) {
-                                       --j;
-                                       --sge;
-                                       sge->addr = 0;
-                                       sge->length = 0;
-                                       sge->lkey = 0;
-                               }
-                               wr->num_sge = 0;
-#endif
+                               elt->buf = NULL;
                                goto stop;
                        }
-                       /* Sanity checks, only relevant with debugging
-                        * enabled. */
-                       assert(sge->addr == 0);
-                       assert(sge->length == 0);
-                       assert(sge->lkey == 0);
-                       /* Update SGE. */
-                       sge->addr = rte_pktmbuf_mtod(buf, uintptr_t);
+                       /* Update element. */
+                       elt->buf = buf;
                        if (txq->priv->vf)
                                rte_prefetch0((volatile void *)
-                                       (uintptr_t)sge->addr);
-                       sge->length = DATA_LEN(buf);
-                       sge->lkey = lkey;
-#ifdef MLX4_PMD_SOFT_COUNTERS
-                       sent_size += sge->length;
-#endif
-                       buf = NEXT(buf);
-               }
-               /* If buf is not NULL here and is not going to be linearized,
-                * nb_segs is not valid. */
-               assert(j == segs);
-               assert((buf == NULL) || (linearize));
-               /* Linearize extra segments. */
-               if (linearize) {
-                       struct ibv_sge *sge = &elt->sges[segs];
-                       linear_t *linear = &(*txq->elts_linear)[elts_head];
-                       unsigned int size = linearize_mbuf(linear, buf);
-
-                       assert(segs == (elemof(elt->sges) - 1));
-                       if (size == 0) {
-                               /* Invalid packet. */
-                               DEBUG("%p: packet too large to be linearized.",
-                                     (void *)txq);
-                               /* Clean up TX element. */
-                               WR_ID(elt->wr.wr_id).offset = 0;
-#ifndef NDEBUG
-                               /* For assert(). */
-                               while (j) {
-                                       --j;
-                                       --sge;
-                                       sge->addr = 0;
-                                       sge->length = 0;
-                                       sge->lkey = 0;
-                               }
-                               wr->num_sge = 0;
+                                             (uintptr_t)addr);
+                       RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+                       /* Put packet into send queue. */
+#if MLX4_PMD_MAX_INLINE > 0
+                       if (length <= txq->max_inline)
+                               err = txq->if_qp->send_pending_inline
+                                       (txq->qp,
+                                        (void *)addr,
+                                        length,
+                                        send_flags);
+                       else
 #endif
+                               err = txq->if_qp->send_pending
+                                       (txq->qp,
+                                        addr,
+                                        length,
+                                        lkey,
+                                        send_flags);
+                       if (unlikely(err))
                                goto stop;
-                       }
-                       /* If MLX4_PMD_SGE_WR_N is 1, free mbuf immediately
-                        * and clear offset from WR ID. */
-                       if (elemof(elt->sges) == 1) {
-                               do {
-                                       struct rte_mbuf *next = NEXT(buf);
+#ifdef MLX4_PMD_SOFT_COUNTERS
+                       sent_size += length;
+#endif
+               } else {
+#if MLX4_PMD_SGE_WR_N > 1
+                       struct ibv_sge sges[MLX4_PMD_SGE_WR_N];
+                       struct tx_burst_sg_ret ret;
 
-                                       rte_pktmbuf_free_seg(buf);
-                                       buf = next;
-                               } while (buf != NULL);
-                               WR_ID(wr->wr_id).offset = 0;
-                       }
-                       /* Set WR fields and fill SGE with linear buffer. */
-                       ++wr->num_sge;
-                       /* Sanity checks, only relevant with debugging
-                        * enabled. */
-                       assert(sge->addr == 0);
-                       assert(sge->length == 0);
-                       assert(sge->lkey == 0);
-                       /* Update SGE. */
-                       sge->addr = (uintptr_t)&(*linear)[0];
-                       sge->length = size;
-                       sge->lkey = txq->mr_linear->lkey;
+                       ret = tx_burst_sg(txq, segs, elt, buf, elts_head,
+                                         &sges);
+                       if (ret.length == (unsigned int)-1)
+                               goto stop;
+                       RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
+                       /* Put SG list into send queue. */
+                       err = txq->if_qp->send_pending_sg_list
+                               (txq->qp,
+                                sges,
+                                ret.num,
+                                send_flags);
+                       if (unlikely(err))
+                               goto stop;
 #ifdef MLX4_PMD_SOFT_COUNTERS
-                       sent_size += size;
+                       sent_size += ret.length;
 #endif
+#else /* MLX4_PMD_SGE_WR_N > 1 */
+                       DEBUG("%p: TX scattered buffers support not"
+                             " compiled in", (void *)txq);
+                       goto stop;
+#endif /* MLX4_PMD_SGE_WR_N > 1 */
                }
-               /* Link WRs together for ibv_post_send(). */
-               *wr_next = wr;
-               wr_next = &wr->next;
-               assert(wr->send_flags == 0);
-               /* Request TX completion. */
-               if (unlikely(--elts_comp_cd == 0)) {
-                       elts_comp_cd = txq->elts_comp_cd_init;
-                       ++elts_comp;
-                       send_flags |= IBV_EXP_QP_BURST_SIGNALED;
-               }
-               if (++elts_head >= elts_n)
-                       elts_head = 0;
+               elts_head = elts_head_next;
 #ifdef MLX4_PMD_SOFT_COUNTERS
                /* Increment sent bytes counter. */
                txq->stats.obytes += sent_size;
 #endif
-               /* Put SG list into send queue and ask for completion event. */
-#if MLX4_PMD_MAX_INLINE > 0
-               if ((segs == 1) &&
-                   (elt->sges[0].length <= txq->max_inline))
-                       err = txq->if_qp->send_pending_inline
-                               (txq->qp,
-                                (void *)(uintptr_t)elt->sges[0].addr,
-                                elt->sges[0].length,
-                                send_flags);
-               else
-#endif
-                       err = txq->if_qp->send_pending_sg_list
-                               (txq->qp,
-                                elt->sges,
-                                segs,
-                                send_flags);
-               if (unlikely(err))
-                       goto stop;
        }
 stop:
        /* Take a shortcut if nothing must be sent. */
@@ -1298,7 +1353,6 @@ stop:
        /* Increment sent packets counter. */
        txq->stats.opackets += i;
 #endif
-       *wr_next = NULL;
        /* Ring QP doorbell. */
        err = txq->if_qp->send_flush(txq->qp);
        if (unlikely(err)) {
@@ -1341,7 +1395,9 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
        };
        union {
                struct ibv_exp_query_intf_params params;
-               struct ibv_qp_init_attr init;
+               struct ibv_exp_qp_init_attr init;
+               struct ibv_exp_res_domain_init_attr rd;
+               struct ibv_exp_cq_init_attr cq;
                struct ibv_exp_qp_attr mod;
        } attr;
        enum ibv_exp_query_intf_status status;
@@ -1355,7 +1411,24 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
        }
        desc /= MLX4_PMD_SGE_WR_N;
        /* MRs will be registered in mp2mr[] later. */
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -1366,7 +1439,7 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
              priv->device_attr.max_qp_wr);
        DEBUG("priv->device_attr.max_sge is %d",
              priv->device_attr.max_sge);
-       attr.init = (struct ibv_qp_init_attr){
+       attr.init = (struct ibv_exp_qp_init_attr){
                /* CQ to be associated with the send queue. */
                .send_cq = tmpl.cq,
                /* CQ to be associated with the receive queue. */
@@ -1388,9 +1461,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .qp_type = IBV_QPT_RAW_PACKET,
                /* Do *NOT* enable this, completions events are managed per
                 * TX burst. */
-               .sq_sig_all = 0
+               .sq_sig_all = 0,
+               .pd = priv->pd,
+               .res_domain = tmpl.rd,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
        };
-       tmpl.qp = ibv_create_qp(priv->pd, &attr.init);
+       tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -1451,6 +1528,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc,
                .intf_scope = IBV_EXP_INTF_GLOBAL,
                .intf = IBV_EXP_INTF_QP_BURST,
                .obj = tmpl.qp,
+#ifdef HAVE_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK
+               /* MC loopback must be disabled when not using a VF. */
+               .family_flags =
+                       (!priv->vf ?
+                        IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK :
+                        0),
+#endif
        };
        tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status);
        if (tmpl.if_qp == NULL) {
@@ -2379,11 +2463,95 @@ rxq_cleanup(struct rxq *rxq)
        }
        if (rxq->cq != NULL)
                claim_zero(ibv_destroy_cq(rxq->cq));
+       if (rxq->rd != NULL) {
+               struct ibv_exp_destroy_res_domain_attr attr = {
+                       .comp_mask = 0,
+               };
+
+               assert(rxq->priv != NULL);
+               assert(rxq->priv->ctx != NULL);
+               claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx,
+                                                     rxq->rd,
+                                                     &attr));
+       }
        if (rxq->mr != NULL)
                claim_zero(ibv_dereg_mr(rxq->mr));
        memset(rxq, 0, sizeof(*rxq));
 }
 
+/**
+ * Translate RX completion flags to packet type.
+ *
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Packet type for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_pkt_type(uint32_t flags)
+{
+       uint32_t pkt_type;
+
+       if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET)
+               pkt_type =
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_OUTER_IPV6_PACKET, RTE_PTYPE_L3_IPV6) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_INNER_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_INNER_L3_IPV6);
+       else
+               pkt_type =
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV4_PACKET, RTE_PTYPE_L3_IPV4) |
+                       TRANSPOSE(flags,
+                                 IBV_EXP_CQ_RX_IPV6_PACKET, RTE_PTYPE_L3_IPV6);
+       return pkt_type;
+}
+
+/**
+ * Translate RX completion flags to offload flags.
+ *
+ * @param[in] rxq
+ *   Pointer to RX queue structure.
+ * @param flags
+ *   RX completion flags returned by poll_length_flags().
+ *
+ * @return
+ *   Offload flags (ol_flags) for struct rte_mbuf.
+ */
+static inline uint32_t
+rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags)
+{
+       uint32_t ol_flags = 0;
+
+       if (rxq->csum)
+               ol_flags |=
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_BAD) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_BAD);
+       /*
+        * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place
+        * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional
+        * (its value is 0).
+        */
+       if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun))
+               ol_flags |=
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK,
+                                 PKT_RX_IP_CKSUM_BAD) |
+                       TRANSPOSE(~flags,
+                                 IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK,
+                                 PKT_RX_L4_CKSUM_BAD);
+       return ol_flags;
+}
+
 static uint16_t
 mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n);
 
@@ -2428,6 +2596,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                struct rte_mbuf **pkt_buf_next = &pkt_buf;
                unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM;
                unsigned int j = 0;
+               uint32_t flags;
 
                /* Sanity checks. */
 #ifdef NDEBUG
@@ -2438,7 +2607,8 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(wr->num_sge == elemof(elt->sges));
                assert(elts_head < rxq->elts_n);
                assert(rxq->elts_head < rxq->elts_n);
-               ret = rxq->if_cq->poll_length(rxq->cq, NULL, NULL);
+               ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+                                                   &flags);
                if (unlikely(ret < 0)) {
                        struct ibv_wc wc;
                        int wcs_n;
@@ -2564,7 +2734,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NB_SEGS(pkt_buf) = j;
                PORT(pkt_buf) = rxq->port_id;
                PKT_LEN(pkt_buf) = pkt_buf_len;
-               pkt_buf->ol_flags = 0;
+               pkt_buf->packet_type = rxq_cq_to_pkt_type(flags);
 
                /* Return packet. */
                *(pkts++) = pkt_buf;
@@ -2641,6 +2811,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                struct rte_mbuf *seg = (void *)((uintptr_t)elt->sge.addr -
                        WR_ID(wr_id).offset);
                struct rte_mbuf *rep;
+               uint32_t flags;
 
                /* Sanity checks. */
                assert(WR_ID(wr_id).id < rxq->elts_n);
@@ -2648,7 +2819,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                assert(wr->num_sge == 1);
                assert(elts_head < rxq->elts_n);
                assert(rxq->elts_head < rxq->elts_n);
-               ret = rxq->if_cq->poll_length(rxq->cq, NULL, NULL);
+               ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
+                                                   &flags);
                if (unlikely(ret < 0)) {
                        struct ibv_wc wc;
                        int wcs_n;
@@ -2722,7 +2894,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                NEXT(seg) = NULL;
                PKT_LEN(seg) = len;
                DATA_LEN(seg) = len;
-               seg->ol_flags = 0;
+               seg->packet_type = rxq_cq_to_pkt_type(flags);
+               seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags);
 
                /* Return packet. */
                *(pkts++) = seg;
@@ -2773,7 +2946,8 @@ repost:
  *   QP pointer or NULL in case of error.
  */
 static struct ibv_qp *
-rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
+rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
+            struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2792,8 +2966,10 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
                                         MLX4_PMD_SGE_WR_N),
                },
                .qp_type = IBV_QPT_RAW_PACKET,
-               .comp_mask = IBV_EXP_QP_INIT_ATTR_PD,
+               .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN),
                .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2823,7 +2999,7 @@ rxq_setup_qp(struct priv *priv, struct ibv_cq *cq, uint16_t desc)
  */
 static struct ibv_qp *
 rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
-                int parent)
+                int parent, struct ibv_exp_res_domain *rd)
 {
        struct ibv_exp_qp_init_attr attr = {
                /* CQ to be associated with the send queue. */
@@ -2843,8 +3019,10 @@ rxq_setup_qp_rss(struct priv *priv, struct ibv_cq *cq, uint16_t desc,
                },
                .qp_type = IBV_QPT_RAW_PACKET,
                .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD |
+                             IBV_EXP_QP_INIT_ATTR_RES_DOMAIN |
                              IBV_EXP_QP_INIT_ATTR_QPG),
-               .pd = priv->pd
+               .pd = priv->pd,
+               .res_domain = rd,
        };
 
 #ifdef INLINE_RECV
@@ -2905,6 +3083,15 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq)
        /* Number of descriptors and mbufs currently allocated. */
        desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1));
        mbuf_n = desc_n;
+       /* Toggle RX checksum offload if hardware supports it. */
+       if (priv->hw_csum) {
+               tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+               rxq->csum = tmpl.csum;
+       }
+       if (priv->hw_csum_l2tun) {
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+               rxq->csum_l2tun = tmpl.csum_l2tun;
+       }
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3091,6 +3278,8 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
        struct ibv_exp_qp_attr mod;
        union {
                struct ibv_exp_query_intf_params params;
+               struct ibv_exp_cq_init_attr cq;
+               struct ibv_exp_res_domain_init_attr rd;
        } attr;
        enum ibv_exp_query_intf_status status;
        struct ibv_recv_wr *bad_wr;
@@ -3126,6 +3315,11 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
                rte_pktmbuf_tailroom(buf)) == tmpl.mb_len);
        assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM);
        rte_pktmbuf_free(buf);
+       /* Toggle RX checksum offload if hardware supports it. */
+       if (priv->hw_csum)
+               tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
+       if (priv->hw_csum_l2tun)
+               tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum;
        /* Enable scattered packets support for this queue if necessary. */
        if ((dev->data->dev_conf.rxmode.jumbo_frame) &&
            (dev->data->dev_conf.rxmode.max_rx_pkt_len >
@@ -3148,7 +3342,24 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc,
                goto error;
        }
 skip_mr:
-       tmpl.cq = ibv_create_cq(priv->ctx, desc, NULL, NULL, 0);
+       attr.rd = (struct ibv_exp_res_domain_init_attr){
+               .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL |
+                             IBV_EXP_RES_DOMAIN_MSG_MODEL),
+               .thread_model = IBV_EXP_THREAD_SINGLE,
+               .msg_model = IBV_EXP_MSG_HIGH_BW,
+       };
+       tmpl.rd = ibv_exp_create_res_domain(priv->ctx, &attr.rd);
+       if (tmpl.rd == NULL) {
+               ret = ENOMEM;
+               ERROR("%p: RD creation failure: %s",
+                     (void *)dev, strerror(ret));
+               goto error;
+       }
+       attr.cq = (struct ibv_exp_cq_init_attr){
+               .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN,
+               .res_domain = tmpl.rd,
+       };
+       tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq);
        if (tmpl.cq == NULL) {
                ret = ENOMEM;
                ERROR("%p: CQ creation failure: %s",
@@ -3161,10 +3372,11 @@ skip_mr:
              priv->device_attr.max_sge);
 #ifdef RSS_SUPPORT
        if (priv->rss)
-               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent);
+               tmpl.qp = rxq_setup_qp_rss(priv, tmpl.cq, desc, parent,
+                                          tmpl.rd);
        else
 #endif /* RSS_SUPPORT */
-               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc);
+               tmpl.qp = rxq_setup_qp(priv, tmpl.cq, desc, tmpl.rd);
        if (tmpl.qp == NULL) {
                ret = (errno ? errno : EINVAL);
                ERROR("%p: QP creation failure: %s",
@@ -3623,6 +3835,18 @@ mlx4_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info)
        info->max_rx_queues = max;
        info->max_tx_queues = max;
        info->max_mac_addrs = elemof(priv->mac);
+       info->rx_offload_capa =
+               (priv->hw_csum ?
+                (DEV_RX_OFFLOAD_IPV4_CKSUM |
+                 DEV_RX_OFFLOAD_UDP_CKSUM |
+                 DEV_RX_OFFLOAD_TCP_CKSUM) :
+                0);
+       info->tx_offload_capa =
+               (priv->hw_csum ?
+                (DEV_TX_OFFLOAD_IPV4_CKSUM |
+                 DEV_TX_OFFLOAD_UDP_CKSUM |
+                 DEV_TX_OFFLOAD_TCP_CKSUM) :
+                0);
        priv_unlock(priv);
 }
 
@@ -4341,6 +4565,8 @@ static const struct eth_dev_ops mlx4_dev_ops = {
        .mac_addr_remove = mlx4_mac_addr_remove,
        .mac_addr_add = mlx4_mac_addr_add,
        .mtu_set = mlx4_dev_set_mtu,
+       .udp_tunnel_add = NULL,
+       .udp_tunnel_del = NULL,
        .fdir_add_signature_filter = NULL,
        .fdir_update_signature_filter = NULL,
        .fdir_remove_signature_filter = NULL,
@@ -4663,6 +4889,19 @@ mlx4_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
                              exp_device_attr.max_rss_tbl_sz);
 #endif /* RSS_SUPPORT */
 
+               priv->hw_csum =
+                       ((exp_device_attr.exp_device_cap_flags &
+                         IBV_EXP_DEVICE_RX_CSUM_TCP_UDP_PKT) &&
+                        (exp_device_attr.exp_device_cap_flags &
+                         IBV_EXP_DEVICE_RX_CSUM_IP_PKT));
+               DEBUG("checksum offloading is %ssupported",
+                     (priv->hw_csum ? "" : "not "));
+
+               priv->hw_csum_l2tun = !!(exp_device_attr.exp_device_cap_flags &
+                                        IBV_EXP_DEVICE_VXLAN_SUPPORT);
+               DEBUG("L2 tunnel checksum offloads are %ssupported",
+                     (priv->hw_csum_l2tun ? "" : "not "));
+
 #ifdef INLINE_RECV
                priv->inl_recv_size = mlx4_getenv_int("MLX4_INLINE_RECV_SIZE");