mbuf: add namespace to offload flags
[dpdk.git] / drivers / net / mlx5 / mlx5_rxtx_vec_neon.h
index 4c067d8..aa36df2 100644 (file)
@@ -220,12 +220,12 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                                const uint32x4_t ft_mask =
                                        vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
                                const uint32x4_t fdir_flags =
-                                       vdupq_n_u32(PKT_RX_FDIR);
+                                       vdupq_n_u32(RTE_MBUF_F_RX_FDIR);
                                const uint32x4_t fdir_all_flags =
-                                       vdupq_n_u32(PKT_RX_FDIR |
-                                                   PKT_RX_FDIR_ID);
+                                       vdupq_n_u32(RTE_MBUF_F_RX_FDIR |
+                                                   RTE_MBUF_F_RX_FDIR_ID);
                                uint32x4_t fdir_id_flags =
-                                       vdupq_n_u32(PKT_RX_FDIR_ID);
+                                       vdupq_n_u32(RTE_MBUF_F_RX_FDIR_ID);
                                uint32x4_t invalid_mask, ftag;
 
                                __asm__ volatile
@@ -240,7 +240,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                                invalid_mask = vceqzq_u32(ftag);
                                ol_flags_mask = vorrq_u32(ol_flags_mask,
                                                          fdir_all_flags);
-                               /* Set PKT_RX_FDIR if flow tag is non-zero. */
+                               /* Set RTE_MBUF_F_RX_FDIR if flow tag is non-zero. */
                                ol_flags = vorrq_u32(ol_flags,
                                        vbicq_u32(fdir_flags, invalid_mask));
                                /* Mask out invalid entries. */
@@ -276,8 +276,8 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                                const uint8_t pkt_hdr3 =
                                        mcq[pos % 8 + 3].hdr_type;
                                const uint32x4_t vlan_mask =
-                                       vdupq_n_u32(PKT_RX_VLAN |
-                                                   PKT_RX_VLAN_STRIPPED);
+                                       vdupq_n_u32(RTE_MBUF_F_RX_VLAN |
+                                                   RTE_MBUF_F_RX_VLAN_STRIPPED);
                                const uint32x4_t cv_mask =
                                        vdupq_n_u32(MLX5_CQE_VLAN_STRIPPED);
                                const uint32x4_t pkt_cv = {
@@ -317,7 +317,7 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                                }
                        }
                        const uint32x4_t hash_flags =
-                               vdupq_n_u32(PKT_RX_RSS_HASH);
+                               vdupq_n_u32(RTE_MBUF_F_RX_RSS_HASH);
                        const uint32x4_t rearm_flags =
                                vdupq_n_u32((uint32_t)t_pkt->ol_flags);
 
@@ -396,22 +396,22 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
        uint16x4_t ptype;
        uint32x4_t pinfo, cv_flags;
        uint32x4_t ol_flags =
-               vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH |
+               vdupq_n_u32(rxq->rss_hash * RTE_MBUF_F_RX_RSS_HASH |
                            rxq->hw_timestamp * rxq->timestamp_rx_flag);
        const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 };
        const uint8x16_t cv_flag_sel = {
                0,
-               (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED),
-               (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1),
+               (uint8_t)(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED),
+               (uint8_t)(RTE_MBUF_F_RX_IP_CKSUM_GOOD >> 1),
                0,
-               (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1),
+               (uint8_t)(RTE_MBUF_F_RX_L4_CKSUM_GOOD >> 1),
                0,
-               (uint8_t)((PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1),
+               (uint8_t)((RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1),
                0, 0, 0, 0, 0, 0, 0, 0, 0
        };
        const uint32x4_t cv_mask =
-               vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD |
-                           PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+               vdupq_n_u32(RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+                           RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
        const uint64x2_t mbuf_init = vld1q_u64
                                ((const uint64_t *)&rxq->mbuf_initializer);
        uint64x2_t rearm0, rearm1, rearm2, rearm3;
@@ -419,11 +419,11 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq,
 
        if (rxq->mark) {
                const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT);
-               const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR);
-               uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID);
+               const uint32x4_t fdir_flags = vdupq_n_u32(RTE_MBUF_F_RX_FDIR);
+               uint32x4_t fdir_id_flags = vdupq_n_u32(RTE_MBUF_F_RX_FDIR_ID);
                uint32x4_t invalid_mask;
 
-               /* Check if flow tag is non-zero then set PKT_RX_FDIR. */
+               /* Check if flow tag is non-zero then set RTE_MBUF_F_RX_FDIR. */
                invalid_mask = vceqzq_u32(flow_tag);
                ol_flags = vorrq_u32(ol_flags,
                                     vbicq_u32(fdir_flags, invalid_mask));
@@ -593,7 +593,7 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
         * there's no instruction to count trailing zeros. __builtin_clzl() is
         * used instead.
         *
-        * A. copy 4 mbuf pointers from elts ring to returing pkts.
+        * A. copy 4 mbuf pointers from elts ring to returning pkts.
         * B. load 64B CQE and extract necessary fields
         *    Final 16bytes cqes[] extracted from original 64bytes CQE has the
         *    following structure:
@@ -767,16 +767,15 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                comp_idx = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
                                          comp_mask), 0)) /
                                          (sizeof(uint16_t) * 8);
-               /* D.6 mask out entries after the compressed CQE. */
-               mask = vcreate_u16(comp_idx < MLX5_VPMD_DESCS_PER_LOOP ?
-                                  -1UL >> (comp_idx * sizeof(uint16_t) * 8) :
-                                  0);
-               invalid_mask = vorr_u16(invalid_mask, mask);
+               invalid_mask = vorr_u16(invalid_mask, comp_mask);
                /* D.7 count non-compressed valid CQEs. */
                n = __builtin_clzl(vget_lane_u64(vreinterpret_u64_u16(
                                   invalid_mask), 0)) / (sizeof(uint16_t) * 8);
                nocmp_n += n;
-               /* D.2 get the final invalid mask. */
+               /*
+                * D.2 mask out entries after the compressed CQE.
+                *     get the final invalid mask.
+                */
                mask = vcreate_u16(n < MLX5_VPMD_DESCS_PER_LOOP ?
                                   -1UL >> (n * sizeof(uint16_t) * 8) : 0);
                invalid_mask = vorr_u16(invalid_mask, mask);
@@ -832,19 +831,24 @@ rxq_cq_process_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq,
                if (rxq->dynf_meta) {
                        /* This code is subject for futher optimization. */
                        int32_t offs = rxq->flow_meta_offset;
+                       uint32_t mask = rxq->flow_meta_port_mask;
 
                        *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
-                               container_of(p0, struct mlx5_cqe,
-                                            pkt_info)->flow_table_metadata;
+                               rte_be_to_cpu_32(container_of
+                               (p0, struct mlx5_cqe,
+                               pkt_info)->flow_table_metadata) & mask;
                        *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
-                               container_of(p1, struct mlx5_cqe,
-                                            pkt_info)->flow_table_metadata;
+                               rte_be_to_cpu_32(container_of
+                               (p1, struct mlx5_cqe,
+                               pkt_info)->flow_table_metadata) & mask;
                        *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
-                               container_of(p2, struct mlx5_cqe,
-                                            pkt_info)->flow_table_metadata;
+                               rte_be_to_cpu_32(container_of
+                               (p2, struct mlx5_cqe,
+                               pkt_info)->flow_table_metadata) & mask;
                        *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
-                               container_of(p3, struct mlx5_cqe,
-                                            pkt_info)->flow_table_metadata;
+                               rte_be_to_cpu_32(container_of
+                               (p3, struct mlx5_cqe,
+                               pkt_info)->flow_table_metadata) & mask;
                        if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
                                elts[pos]->ol_flags |= rxq->flow_meta_mask;
                        if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))