net/sfc: support flow API isolated mode
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index cf63434..70314b3 100644 (file)
 #include "mlx5_defs.h"
 #include "mlx5_prm.h"
 
-static inline int
+static __rte_always_inline int
 check_cqe(volatile struct mlx5_cqe *cqe,
-         unsigned int cqes_n, const uint16_t ci)
-         __attribute__((always_inline));
+         unsigned int cqes_n, const uint16_t ci);
 
-static inline void
-txq_complete(struct txq *txq) __attribute__((always_inline));
+static __rte_always_inline void
+txq_complete(struct txq *txq);
 
-static inline uint32_t
-txq_mp2mr(struct txq *txq, struct rte_mempool *mp)
-       __attribute__((always_inline));
+static __rte_always_inline uint32_t
+txq_mp2mr(struct txq *txq, struct rte_mempool *mp);
 
-static inline void
-mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe)
-       __attribute__((always_inline));
+static __rte_always_inline void
+mlx5_tx_dbrec(struct txq *txq, volatile struct mlx5_wqe *wqe);
 
-static inline uint32_t
-rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe)
-       __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_pkt_type(volatile struct mlx5_cqe *cqe);
 
-static inline int
+static __rte_always_inline int
 mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe *cqe,
-                uint16_t cqe_cnt, uint32_t *rss_hash)
-                __attribute__((always_inline));
+                uint16_t cqe_cnt, uint32_t *rss_hash);
 
-static inline uint32_t
-rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe)
-                  __attribute__((always_inline));
+static __rte_always_inline uint32_t
+rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe *cqe);
 
 #ifndef NDEBUG
 
@@ -533,12 +527,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                uint16_t ehdr;
                uint8_t cs_flags = 0;
                uint64_t tso = 0;
+               uint16_t tso_segsz = 0;
 #ifdef MLX5_PMD_SOFT_COUNTERS
                uint32_t total_length = 0;
 #endif
 
                /* first_seg */
-               buf = *(pkts++);
+               buf = *pkts;
                segs_n = buf->nb_segs;
                /*
                 * Make sure there is enough room to store this packet and
@@ -549,15 +544,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        break;
                max -= segs_n;
                --segs_n;
-               if (!segs_n)
-                       --pkts_n;
                if (unlikely(--max_wqe == 0))
                        break;
                wqe = (volatile struct mlx5_wqe_v *)
                        tx_mlx5_wqe(txq, txq->wqe_ci);
                rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-               if (pkts_n > 1)
-                       rte_prefetch0(*pkts);
+               if (pkts_n - i > 1)
+                       rte_prefetch0(*(pkts + 1));
                addr = rte_pktmbuf_mtod(buf, uintptr_t);
                length = DATA_LEN(buf);
                ehdr = (((uint8_t *)addr)[1] << 8) |
@@ -569,14 +562,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                        break;
                /* Update element. */
                (*txq->elts)[elts_head] = buf;
-               elts_head = (elts_head + 1) & (elts_n - 1);
                /* Prefetch next buffer data. */
-               if (pkts_n > 1) {
-                       volatile void *pkt_addr;
-
-                       pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
-                       rte_prefetch0(pkt_addr);
-               }
+               if (pkts_n - i > 1)
+                       rte_prefetch0(
+                           rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
                /* Should we enable HW CKSUM offload */
                if (buf->ol_flags &
                    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
@@ -634,6 +623,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 
                                tso_header_sz = buf->l2_len + vlan_sz +
                                                buf->l3_len + buf->l4_len;
+                               tso_segsz = buf->tso_segsz;
 
                                if (is_tunneled && txq->tunnel_en) {
                                        tso_header_sz += buf->outer_l2_len +
@@ -679,10 +669,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
                                        };
                                        ds = 1;
                                        total_length = 0;
-                                       pkts--;
-                                       pkts_n++;
-                                       elts_head = (elts_head - 1) &
-                                                   (elts_n - 1);
                                        k++;
                                        goto next_wqe;
                                }
@@ -815,17 +801,17 @@ next_seg:
                        naddr,
                        naddr >> 32,
                };
-               (*txq->elts)[elts_head] = buf;
                elts_head = (elts_head + 1) & (elts_n - 1);
+               (*txq->elts)[elts_head] = buf;
                ++sg;
                /* Advance counter only if all segs are successfully posted. */
-               if (sg < segs_n) {
+               if (sg < segs_n)
                        goto next_seg;
-               } else {
-                       --pkts_n;
+               else
                        j += sg;
-               }
 next_pkt:
+               elts_head = (elts_head + 1) & (elts_n - 1);
+               ++pkts;
                ++i;
                /* Initialize known and common part of the WQE structure. */
                if (tso) {
@@ -837,7 +823,7 @@ next_pkt:
                        };
                        wqe->eseg = (rte_v128u32_t){
                                0,
-                               cs_flags | (htons(buf->tso_segsz) << 16),
+                               cs_flags | (htons(tso_segsz) << 16),
                                0,
                                (ehdr << 16) | htons(tso_header_sz),
                        };
@@ -863,7 +849,7 @@ next_wqe:
                /* Increment sent bytes counter. */
                txq->stats.obytes += total_length;
 #endif
-       } while (pkts_n);
+       } while (i < pkts_n);
        /* Take a shortcut if nothing must be sent. */
        if (unlikely((i + k) == 0))
                return 0;
@@ -2064,9 +2050,7 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
                DATA_LEN(rep) = DATA_LEN(seg);
                PKT_LEN(rep) = PKT_LEN(seg);
                SET_DATA_OFF(rep, DATA_OFF(seg));
-               NB_SEGS(rep) = NB_SEGS(seg);
                PORT(rep) = PORT(seg);
-               NEXT(rep) = NULL;
                (*rxq->elts)[idx] = rep;
                /*
                 * Fill NIC descriptor with the new buffer.  The lkey and size
@@ -2160,76 +2144,3 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
        (void)pkts_n;
        return 0;
 }
-
-/**
- * DPDK callback for rx queue interrupt enable.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rx_queue_id
- *   RX queue number
- *
- * @return
- *   0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
-       struct priv *priv = mlx5_get_priv(dev);
-       struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
-       struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
-       struct ibv_cq *cq = rxq_ctrl->cq;
-       uint16_t ci = rxq->cq_ci;
-       int ret = 0;
-
-       ibv_mlx5_exp_update_cq_ci(cq, ci);
-       ret = ibv_req_notify_cq(cq, 0);
-#else
-       int ret = -1;
-       (void)dev;
-       (void)rx_queue_id;
-#endif
-       if (ret)
-               WARN("unable to arm interrupt on rx queue %d", rx_queue_id);
-       return ret;
-}
-
-/**
- * DPDK callback for rx queue interrupt disable.
- *
- * @param dev
- *   Pointer to Ethernet device structure.
- * @param rx_queue_id
- *   RX queue number
- *
- * @return
- *   0 on success, negative on failure.
- */
-int
-mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
-{
-#ifdef HAVE_UPDATE_CQ_CI
-       struct priv *priv = mlx5_get_priv(dev);
-       struct rxq *rxq = (*priv->rxqs)[rx_queue_id];
-       struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq);
-       struct ibv_cq *cq = rxq_ctrl->cq;
-       struct ibv_cq *ev_cq;
-       void *ev_ctx;
-       int ret = 0;
-
-       ret = ibv_get_cq_event(cq->channel, &ev_cq, &ev_ctx);
-       if (ret || ev_cq != cq)
-               ret = -1;
-       else
-               ibv_ack_cq_events(cq, 1);
-#else
-       int ret = -1;
-       (void)dev;
-       (void)rx_queue_id;
-#endif
-       if (ret)
-               WARN("unable to disable interrupt on rx queue %d",
-                    rx_queue_id);
-       return ret;
-}