const struct rte_flow_action_set_meta *conf,
struct rte_flow_error *error)
{
- uint32_t data = conf->data;
- uint32_t mask = conf->mask;
+ uint32_t mask = rte_cpu_to_be_32(conf->mask);
+ uint32_t data = rte_cpu_to_be_32(conf->data) & mask;
struct rte_flow_item item = {
.spec = &data,
.mask = &mask,
if (reg < 0)
return reg;
MLX5_ASSERT(reg != REG_NON);
- /*
- * In datapath code there is no endianness
- * coversions for perfromance reasons, all
- * pattern conversions are done in rte_flow.
- */
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
- uint32_t shl_c0;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
- MLX5_ASSERT(msk_c0);
-#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
- shl_c0 = rte_bsf32(msk_c0);
-#else
- shl_c0 = sizeof(msk_c0) * CHAR_BIT - rte_fls_u32(msk_c0);
-#endif
- mask <<= shl_c0;
- data <<= shl_c0;
- MLX5_ASSERT(!(~msk_c0 & rte_cpu_to_be_32(mask)));
+ data = rte_cpu_to_be_32(rte_cpu_to_be_32(data) << shl_c0);
+ mask = rte_cpu_to_be_32(mask) & msk_c0;
+ mask = rte_cpu_to_be_32(mask << shl_c0);
}
reg_c_x[0] = (struct field_modify_info){4, 0, reg_to_field[reg]};
/* The routine expects parameters in memory as big-endian ones. */
if (reg < 0)
return;
MLX5_ASSERT(reg != REG_NON);
- /*
- * In datapath code there is no endianness
- * coversions for perfromance reasons, all
- * pattern conversions are done in rte_flow.
- */
- value = rte_cpu_to_be_32(value);
- mask = rte_cpu_to_be_32(mask);
if (reg == REG_C_0) {
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t msk_c0 = priv->sh->dv_regc0_mask;
uint32_t shl_c0 = rte_bsf32(msk_c0);
-#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
- uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
- value >>= shr_c0;
- mask >>= shr_c0;
-#endif
- value <<= shl_c0;
+ mask &= msk_c0;
mask <<= shl_c0;
- MLX5_ASSERT(msk_c0);
- MLX5_ASSERT(!(~msk_c0 & mask));
+ value <<= shl_c0;
}
flow_dv_match_meta_reg(matcher, key, reg, value, mask);
}
}
}
if (rxq->dynf_meta) {
- uint32_t meta = cqe->flow_table_metadata &
- rxq->flow_meta_port_mask;
+ uint32_t meta = rte_be_to_cpu_32(cqe->flow_table_metadata >>
+ __builtin_popcount(rxq->flow_meta_port_mask)) &
+ rxq->flow_meta_port_mask;
if (meta) {
pkt->ol_flags |= rxq->flow_meta_mask;
if (rxq->dynf_meta) {
uint64_t flag = rxq->flow_meta_mask;
int32_t offs = rxq->flow_meta_offset;
- uint32_t metadata, mask;
+ uint32_t mask = rxq->flow_meta_port_mask;
+ uint32_t shift =
+ __builtin_popcount(rxq->flow_meta_port_mask);
+ uint32_t metadata;
- mask = rxq->flow_meta_port_mask;
/* This code is subject for futher optimization. */
- metadata = cq[pos].flow_table_metadata & mask;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
metadata;
pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 1].flow_table_metadata & mask;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 1].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
metadata;
pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 2].flow_table_metadata & mask;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 2].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
metadata;
pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
- metadata = cq[pos + 3].flow_table_metadata & mask;
+ metadata = (rte_be_to_cpu_32
+ (cq[pos + 3].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
metadata;
pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
/* This code is subject for futher optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
+ uint32_t shift =
+ __builtin_popcount(rxq->flow_meta_port_mask);
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
- container_of(p0, struct mlx5_cqe,
- pkt_info)->flow_table_metadata &
- mask;
+ (rte_be_to_cpu_32(container_of
+ (p0, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
- container_of(p1, struct mlx5_cqe,
- pkt_info)->flow_table_metadata &
- mask;
+ (rte_be_to_cpu_32(container_of
+ (p1, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
- container_of(p2, struct mlx5_cqe,
- pkt_info)->flow_table_metadata &
- mask;
+ (rte_be_to_cpu_32(container_of
+ (p2, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
- container_of(p3, struct mlx5_cqe,
- pkt_info)->flow_table_metadata &
- mask;
+ (rte_be_to_cpu_32(container_of
+ (p3, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata) >> shift) &
+ mask;
if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
elts[pos]->ol_flags |= rxq->flow_meta_mask;
if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))
/* This code is subject for futher optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
+ uint32_t shift =
+ __builtin_popcount(rxq->flow_meta_port_mask);
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
- cq[pos].flow_table_metadata & mask;
+ (rte_be_to_cpu_32
+ (cq[pos].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
- cq[pos + p1].flow_table_metadata & mask;
+ (rte_be_to_cpu_32
+ (cq[pos + p1].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
- cq[pos + p2].flow_table_metadata & mask;
+ (rte_be_to_cpu_32
+ (cq[pos + p2].flow_table_metadata) >> shift) &
+ mask;
*RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
- cq[pos + p3].flow_table_metadata & mask;
+ (rte_be_to_cpu_32
+ (cq[pos + p3].flow_table_metadata) >> shift) &
+ mask;
if (*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *))
pkts[pos]->ol_flags |= rxq->flow_meta_mask;
if (*RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *))