net/mlx5: move static asserts to global scope
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx.c
index 402e7d1..65a1f99 100644 (file)
@@ -79,6 +79,56 @@ static uint16_t mlx5_tx_burst_##func(void *txq, \
 
 #define MLX5_TXOFF_INFO(func, olx) {mlx5_tx_burst_##func, olx},
 
+/* static asserts */
+static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
+static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(rte_v128u32_t)),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(struct rte_vlan_hdr) +
+                2 * RTE_ETHER_ADDR_LEN),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(rte_v128u32_t)),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(struct rte_vlan_hdr) +
+                2 * RTE_ETHER_ADDR_LEN),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(rte_v128u32_t)),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
+               (sizeof(uint16_t) +
+                sizeof(struct rte_vlan_hdr) +
+                2 * RTE_ETHER_ADDR_LEN),
+               "invalid Ethernet Segment data size");
+static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
+               (2 * RTE_ETHER_ADDR_LEN),
+               "invalid Data Segment data size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
+static_assert((sizeof(struct rte_vlan_hdr) +
+                       sizeof(struct rte_ether_hdr)) ==
+               MLX5_ESEG_MIN_INLINE_SIZE,
+               "invalid min inline data size");
+static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
+               MLX5_DSEG_MAX, "invalid WQE max size");
+static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
+               "invalid WQE Control Segment size");
+static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
+               "invalid WQE Ethernet Segment size");
+static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
+               "invalid WQE Data Segment size");
+static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
+               "invalid WQE size");
+
 static __rte_always_inline uint32_t
 rxq_cq_to_pkt_type(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                   volatile struct mlx5_mini_cqe8 *mcqe);
@@ -462,11 +512,21 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
 {
        struct rxq_zip *zip = &rxq->zip;
        volatile struct mlx5_cqe *cqe;
-       unsigned int cq_ci = rxq->cq_ci;
        const unsigned int cqe_n = (1 << rxq->cqe_n);
+       const unsigned int sges_n = (1 << rxq->sges_n);
+       const unsigned int elts_n = (1 << rxq->elts_n);
+       const unsigned int strd_n = (1 << rxq->strd_num_n);
        const unsigned int cqe_cnt = cqe_n - 1;
-       unsigned int used = 0;
+       unsigned int cq_ci, used;
 
+       /* if we are processing a compressed cqe */
+       if (zip->ai) {
+               used = zip->cqe_cnt - zip->ai;
+               cq_ci = zip->cq_ci;
+       } else {
+               used = 0;
+               cq_ci = rxq->cq_ci;
+       }
        cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
        while (check_cqe(cqe, cqe_n, cq_ci) != MLX5_CQE_STATUS_HW_OWN) {
                int8_t op_own;
@@ -474,17 +534,14 @@ rx_queue_count(struct mlx5_rxq_data *rxq)
 
                op_own = cqe->op_own;
                if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED)
-                       if (unlikely(zip->ai))
-                               n = zip->cqe_cnt - zip->ai;
-                       else
-                               n = rte_be_to_cpu_32(cqe->byte_cnt);
+                       n = rte_be_to_cpu_32(cqe->byte_cnt);
                else
                        n = 1;
                cq_ci += n;
                used += n;
                cqe = &(*rxq->cqes)[cq_ci & cqe_cnt];
        }
-       used = RTE_MIN(used, cqe_n);
+       used = RTE_MIN(used * sges_n, elts_n * strd_n);
        return used;
 }
 
@@ -548,7 +605,7 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
 
        if (!rxq)
                return;
-       qinfo->mp = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+       qinfo->mp = mlx5_rxq_mprq_enabled(rxq) ?
                                        rxq->mprq_mp : rxq->mp;
        qinfo->conf.rx_thresh.pthresh = 0;
        qinfo->conf.rx_thresh.hthresh = 0;
@@ -558,7 +615,9 @@ mlx5_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        qinfo->conf.rx_deferred_start = rxq_ctrl ? 0 : 1;
        qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
        qinfo->scattered_rx = dev->data->scattered_rx;
-       qinfo->nb_desc = 1 << rxq->elts_n;
+       qinfo->nb_desc = mlx5_rxq_mprq_enabled(rxq) ?
+               (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
+               (1 << rxq->elts_n);
 }
 
 /**
@@ -1181,6 +1240,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                } else {
                        int ret;
                        int8_t op_own;
+                       uint32_t cq_ci;
 
                        ret = check_cqe(cqe, cqe_n, rxq->cq_ci);
                        if (unlikely(ret != MLX5_CQE_STATUS_SW_OWN)) {
@@ -1194,14 +1254,19 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                        return 0;
                                }
                        }
-                       ++rxq->cq_ci;
+                       /*
+                        * Introduce the local variable to have queue cq_ci
+                        * index in queue structure always consistent with
+                        * actual CQE boundary (not pointing to the middle
+                        * of compressed CQE session).
+                        */
+                       cq_ci = rxq->cq_ci + 1;
                        op_own = cqe->op_own;
                        if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) {
                                volatile struct mlx5_mini_cqe8 (*mc)[8] =
                                        (volatile struct mlx5_mini_cqe8 (*)[8])
                                        (uintptr_t)(&(*rxq->cqes)
-                                               [rxq->cq_ci &
-                                                cqe_cnt].pkt_info);
+                                               [cq_ci & cqe_cnt].pkt_info);
 
                                /* Fix endianness. */
                                zip->cqe_cnt = rte_be_to_cpu_32(cqe->byte_cnt);
@@ -1214,10 +1279,9 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                 * 7 CQEs after the initial CQE instead of 8
                                 * for subsequent ones.
                                 */
-                               zip->ca = rxq->cq_ci;
+                               zip->ca = cq_ci;
                                zip->na = zip->ca + 7;
                                /* Compute the next non compressed CQE. */
-                               --rxq->cq_ci;
                                zip->cq_ci = rxq->cq_ci + zip->cqe_cnt;
                                /* Get packet size to return. */
                                len = rte_be_to_cpu_32((*mc)[0].byte_cnt &
@@ -1233,6 +1297,7 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe,
                                        ++idx;
                                }
                        } else {
+                               rxq->cq_ci = cq_ci;
                                len = rte_be_to_cpu_32(cqe->byte_cnt);
                        }
                }
@@ -2055,8 +2120,6 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
        bool ring_doorbell = false;
        int ret;
 
-       static_assert(MLX5_CQE_STATUS_HW_OWN < 0, "Must be negative value");
-       static_assert(MLX5_CQE_STATUS_SW_OWN < 0, "Must be negative value");
        do {
                volatile struct mlx5_cqe *cqe;
 
@@ -2098,8 +2161,10 @@ mlx5_tx_handle_completion(struct mlx5_txq_data *__rte_restrict txq,
                }
                /* Normal transmit completion. */
                MLX5_ASSERT(txq->cq_ci != txq->cq_pi);
+#ifdef RTE_LIBRTE_MLX5_DEBUG
                MLX5_ASSERT((txq->fcqs[txq->cq_ci & txq->cqe_m] >> 16) ==
                            cqe->wqe_counter);
+#endif
                ring_doorbell = true;
                ++txq->cq_ci;
                last_cqe = cqe;
@@ -2364,15 +2429,6 @@ mlx5_tx_eseg_dmin(struct mlx5_txq_data *__rte_restrict txq __rte_unused,
        es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
                       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
                       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(rte_v128u32_t)),
-                     "invalid Ethernet Segment data size");
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(struct rte_vlan_hdr) +
-                                2 * RTE_ETHER_ADDR_LEN),
-                     "invalid Ethernet Segment data size");
        psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
        es->inline_hdr_sz = RTE_BE16(MLX5_ESEG_MIN_INLINE_SIZE);
        es->inline_data = *(unaligned_uint16_t *)psrc;
@@ -2457,15 +2513,6 @@ mlx5_tx_eseg_data(struct mlx5_txq_data *__rte_restrict txq,
        es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
                       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
                       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(rte_v128u32_t)),
-                     "invalid Ethernet Segment data size");
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(struct rte_vlan_hdr) +
-                                2 * RTE_ETHER_ADDR_LEN),
-                     "invalid Ethernet Segment data size");
        psrc = rte_pktmbuf_mtod(loc->mbuf, uint8_t *);
        es->inline_hdr_sz = rte_cpu_to_be_16(inlen);
        es->inline_data = *(unaligned_uint16_t *)psrc;
@@ -2680,15 +2727,6 @@ mlx5_tx_eseg_mdat(struct mlx5_txq_data *__rte_restrict txq,
        es->metadata = MLX5_TXOFF_CONFIG(METADATA) ?
                       loc->mbuf->ol_flags & PKT_TX_DYNF_METADATA ?
                       *RTE_FLOW_DYNF_METADATA(loc->mbuf) : 0 : 0;
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(rte_v128u32_t)),
-                     "invalid Ethernet Segment data size");
-       static_assert(MLX5_ESEG_MIN_INLINE_SIZE ==
-                               (sizeof(uint16_t) +
-                                sizeof(struct rte_vlan_hdr) +
-                                2 * RTE_ETHER_ADDR_LEN),
-                     "invalid Ethernet Segment data size");
        MLX5_ASSERT(inlen >= MLX5_ESEG_MIN_INLINE_SIZE);
        pdst = (uint8_t *)&es->inline_data;
        if (MLX5_TXOFF_CONFIG(VLAN) && vlan) {
@@ -2935,9 +2973,6 @@ mlx5_tx_dseg_vlan(struct mlx5_txq_data *__rte_restrict txq,
        uint8_t *pdst;
 
        MLX5_ASSERT(len > MLX5_ESEG_MIN_INLINE_SIZE);
-       static_assert(MLX5_DSEG_MIN_INLINE_SIZE ==
-                                (2 * RTE_ETHER_ADDR_LEN),
-                     "invalid Data Segment data size");
        if (!MLX5_TXOFF_CONFIG(MPW)) {
                /* Store the descriptor byte counter for eMPW sessions. */
                dseg->bcount = rte_cpu_to_be_32
@@ -4053,7 +4088,6 @@ mlx5_tx_burst_empw_simple(struct mlx5_txq_data *__rte_restrict txq,
        MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
        MLX5_ASSERT(loc->elts_free && loc->wqe_free);
        MLX5_ASSERT(pkts_n > loc->pkts_sent);
-       static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
        for (;;) {
@@ -4230,7 +4264,6 @@ mlx5_tx_burst_empw_inline(struct mlx5_txq_data *__rte_restrict txq,
        MLX5_ASSERT(MLX5_TXOFF_CONFIG(EMPW));
        MLX5_ASSERT(loc->elts_free && loc->wqe_free);
        MLX5_ASSERT(pkts_n > loc->pkts_sent);
-       static_assert(MLX5_EMPW_MIN_PACKETS >= 2, "invalid min size");
        pkts += loc->pkts_sent + 1;
        pkts_n -= loc->pkts_sent;
        for (;;) {
@@ -4544,10 +4577,6 @@ mlx5_tx_burst_single_send(struct mlx5_txq_data *__rte_restrict txq,
                            loc->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
                                vlan = sizeof(struct rte_vlan_hdr);
                                inlen += vlan;
-                               static_assert((sizeof(struct rte_vlan_hdr) +
-                                              sizeof(struct rte_ether_hdr)) ==
-                                              MLX5_ESEG_MIN_INLINE_SIZE,
-                                              "invalid min inline data size");
                        }
                        /*
                         * If inlining is enabled at configuration time
@@ -5550,16 +5579,6 @@ mlx5_select_tx_function(struct rte_eth_dev *dev)
        uint64_t tx_offloads = dev->data->dev_conf.txmode.offloads;
        unsigned int diff = 0, olx = 0, i, m;
 
-       static_assert(MLX5_WQE_SIZE_MAX / MLX5_WSEG_SIZE <=
-                     MLX5_DSEG_MAX, "invalid WQE max size");
-       static_assert(MLX5_WQE_CSEG_SIZE == MLX5_WSEG_SIZE,
-                     "invalid WQE Control Segment size");
-       static_assert(MLX5_WQE_ESEG_SIZE == MLX5_WSEG_SIZE,
-                     "invalid WQE Ethernet Segment size");
-       static_assert(MLX5_WQE_DSEG_SIZE == MLX5_WSEG_SIZE,
-                     "invalid WQE Data Segment size");
-       static_assert(MLX5_WQE_SIZE == 4 * MLX5_WSEG_SIZE,
-                     "invalid WQE size");
        MLX5_ASSERT(priv);
        if (tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
                /* We should support Multi-Segment Packets. */