CFLAGS += -I.
CFLAGS += -D_XOPEN_SOURCE=600
CFLAGS += $(WERROR_FLAGS)
+CFLAGS += -Wno-strict-prototypes
LDLIBS += -libverbs
# A few warnings cannot be avoided in external headers.
rxq_free_elts_sp(rxq);
else
rxq_free_elts(rxq);
+ rxq->poll = NULL;
+ rxq->recv = NULL;
if (rxq->if_wq != NULL) {
assert(rxq->priv != NULL);
assert(rxq->priv->ctx != NULL);
err = EIO;
goto error;
}
+ if (tmpl.sp)
+ tmpl.recv = tmpl.if_wq->recv_sg_list;
+ else
+ tmpl.recv = tmpl.if_wq->recv_burst;
error:
*rxq = tmpl;
assert(err >= 0);
*rxq = tmpl;
DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl);
assert(ret == 0);
+ /* Assign function in queue. */
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ rxq->poll = rxq->if_cq->poll_length_flags_cvlan;
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ rxq->poll = rxq->if_cq->poll_length_flags;
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ if (rxq->sp)
+ rxq->recv = rxq->if_wq->recv_sg_list;
+ else
+ rxq->recv = rxq->if_wq->recv_burst;
return 0;
error:
rxq_cleanup(&tmpl);
DEBUG("%p: processing %u work requests completions",
(void *)txq, elts_comp);
#endif
- wcs_n = txq->if_cq->poll_cnt(txq->cq, elts_comp);
+ wcs_n = txq->poll_cnt(txq->cq, elts_comp);
if (unlikely(wcs_n == 0))
return 0;
if (unlikely(wcs_n < 0)) {
/* Put packet into send queue. */
#if MLX5_PMD_MAX_INLINE > 0
if (length <= txq->max_inline)
- err = txq->if_qp->send_pending_inline
+ err = txq->send_pending_inline
(txq->qp,
(void *)addr,
length,
send_flags);
else
#endif
- err = txq->if_qp->send_pending
+ err = txq->send_pending
(txq->qp,
addr,
length,
goto stop;
RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf);
/* Put SG list into send queue. */
- err = txq->if_qp->send_pending_sg_list
+ err = txq->send_pending_sg_list
(txq->qp,
sges,
ret.num,
txq->stats.opackets += i;
#endif
/* Ring QP doorbell. */
- err = txq->if_qp->send_flush(txq->qp);
+ err = txq->send_flush(txq->qp);
if (unlikely(err)) {
/* A nonzero value is not supposed to be returned.
* Nothing can be done about it. */
/* Sanity checks. */
assert(elts_head < rxq->elts_n);
assert(rxq->elts_head < rxq->elts_n);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
- &flags, &vlan_tci);
-#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
- (void)vlan_tci;
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
rxq->stats.ibytes += pkt_buf_len;
#endif
repost:
- ret = rxq->if_wq->recv_sg_list(rxq->wq,
- elt->sges,
- RTE_DIM(elt->sges));
+ ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges));
if (unlikely(ret)) {
/* Inability to repost WRs is fatal. */
DEBUG("%p: recv_sg_list(): failed (ret=%d)",
*/
rte_prefetch0(seg);
rte_prefetch0(&seg->cacheline1);
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- ret = rxq->if_cq->poll_length_flags_cvlan(rxq->cq, NULL, NULL,
- &flags, &vlan_tci);
-#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL,
- &flags);
- (void)vlan_tci;
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci);
if (unlikely(ret < 0)) {
struct ibv_wc wc;
int wcs_n;
#ifdef DEBUG_RECV
DEBUG("%p: reposting %u WRs", (void *)rxq, i);
#endif
- ret = rxq->if_wq->recv_burst(rxq->wq, sges, i);
+ ret = rxq->recv(rxq->wq, sges, i);
if (unlikely(ret)) {
/* Inability to repost WRs is fatal. */
DEBUG("%p: recv_burst(): failed (ret=%d)",
struct rte_mempool *mp; /* Memory Pool for allocations. */
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_exp_wq *wq; /* Work Queue. */
- struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
-#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
- struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
-#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
- struct ibv_exp_cq_family *if_cq; /* CQ interface. */
-#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ int32_t (*poll)(); /* Verbs poll function. */
+ int32_t (*recv)(); /* Verbs receive function. */
unsigned int port_id; /* Port ID for incoming packets. */
unsigned int elts_n; /* (*elts)[] length. */
unsigned int elts_head; /* Current index in (*elts)[]. */
struct ibv_exp_res_domain *rd; /* Resource Domain. */
struct fdir_queue fdir_queue; /* Flow director queue. */
struct ibv_mr *mr; /* Memory Region (for mp). */
+ struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */
+#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS
+ struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */
+#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
+ struct ibv_exp_cq_family *if_cq; /* CQ interface. */
+#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */
};
/* Hash RX queue types. */
/* TX queue descriptor. */
struct txq {
struct priv *priv; /* Back pointer to private data. */
+ int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max);
+ int (*send_pending)();
+#if MLX5_PMD_MAX_INLINE > 0
+ int (*send_pending_inline)();
+#endif
+#if MLX5_PMD_SGE_WR_N > 1
+ int (*send_pending_sg_list)();
+#endif
+ int (*send_flush)(struct ibv_qp *qp);
struct ibv_cq *cq; /* Completion Queue. */
struct ibv_qp *qp; /* Queue Pair. */
struct txq_elt (*elts)[]; /* TX elements. */
DEBUG("cleaning up %p", (void *)txq);
txq_free_elts(txq);
+ txq->poll_cnt = NULL;
+#if MLX5_PMD_MAX_INLINE > 0
+ txq->send_pending_inline = NULL;
+#endif
+ txq->send_flush = NULL;
if (txq->if_qp != NULL) {
assert(txq->priv != NULL);
assert(txq->priv->ctx != NULL);
DEBUG("%p: cleaning-up old txq just in case", (void *)txq);
txq_cleanup(txq);
*txq = tmpl;
+ txq->poll_cnt = txq->if_cq->poll_cnt;
+#if MLX5_PMD_MAX_INLINE > 0
+ txq->send_pending_inline = txq->if_qp->send_pending_inline;
+#endif
+#if MLX5_PMD_SGE_WR_N > 1
+ txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list;
+#endif
+ txq->send_pending = txq->if_qp->send_pending;
+ txq->send_flush = txq->if_qp->send_flush;
DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl);
/* Pre-register known mempools. */
rte_mempool_walk(txq_mp2mr_iter, txq);