uint16_t hdr_type_etc;
uint16_t vlan_info;
uint8_t lro_num_seg;
- uint8_t rsvd3[11];
+ uint8_t rsvd3[3];
+ uint32_t flow_table_metadata;
+ uint8_t rsvd4[4];
uint32_t byte_cnt;
uint64_t timestamp;
uint32_t sop_drop_qpn;
uint16_t wqe_counter;
- uint8_t rsvd4;
+ uint8_t rsvd5;
uint8_t op_own;
};
#include <rte_branch_prediction.h>
#include <rte_ether.h>
#include <rte_cycles.h>
+#include <rte_flow.h>
#include "mlx5.h"
#include "mlx5_utils.h"
pkt->hash.fdir.hi = mlx5_flow_mark_get(mark);
}
}
+ if (rte_flow_dynf_metadata_avail() && cqe->flow_table_metadata) {
+ pkt->ol_flags |= PKT_RX_DYNF_METADATA;
+ *RTE_FLOW_DYNF_METADATA(pkt) = cqe->flow_table_metadata;
+ }
if (rxq->csum)
pkt->ol_flags |= rxq_cq_to_ol_flags(cqe);
if (rxq->vlan_strip &&
vec_cmpeq((vector unsigned int)flow_tag,
(vector unsigned int)pinfo_ft_mask)));
}
-
/*
* Merge the two fields to generate the following:
* bit[1] = l3_ok
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
}
-
+ if (rte_flow_dynf_metadata_avail()) {
+ uint64_t flag = rte_flow_dynf_metadata_mask;
+ int offs = rte_flow_dynf_metadata_offs;
+ uint32_t metadata;
+
+ /* This code is subject for futher optimization. */
+ metadata = cq[pos].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
+ metadata;
+ pkts[pos]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 1].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 1], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 1]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 2].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 2], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 2]->ol_flags |= metadata ? flag : 0ULL;
+ metadata = cq[pos + 3].flow_table_metadata;
+ *RTE_MBUF_DYNFIELD(pkts[pos + 3], offs, uint32_t *) =
+ metadata;
+ pkts[pos + 3]->ol_flags |= metadata ? flag : 0ULL;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = vec_perm(op_own, zero, len_shuf_mask);
container_of(p3, struct mlx5_cqe,
pkt_info)->timestamp);
}
+ if (rte_flow_dynf_metadata_avail()) {
+ /* This code is subject for futher optimization. */
+ *RTE_FLOW_DYNF_METADATA(elts[pos]) =
+ container_of(p0, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 1]) =
+ container_of(p1, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 2]) =
+ container_of(p2, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(elts[pos + 3]) =
+ container_of(p3, struct mlx5_cqe,
+ pkt_info)->flow_table_metadata;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos]))
+ elts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 1]))
+ elts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 2]))
+ elts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(elts[pos + 3]))
+ elts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = vbic_u16(byte_cnt, invalid_mask);
cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd3[9]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd3[9]);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd4[2]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd4[2]);
cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
/* C.2 generate final structure for mbuf with swapping bytes. */
cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
- cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd3[9]);
- cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd3[9]);
+ cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd4[2]);
+ cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd4[2]);
cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
/* C.2 generate final structure for mbuf with swapping bytes. */
pkts[pos + 3]->timestamp =
rte_be_to_cpu_64(cq[pos + p3].timestamp);
}
+ if (rte_flow_dynf_metadata_avail()) {
+ /* This code is subject for futher optimization. */
+ *RTE_FLOW_DYNF_METADATA(pkts[pos]) =
+ cq[pos].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 1]) =
+ cq[pos + p1].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 2]) =
+ cq[pos + p2].flow_table_metadata;
+ *RTE_FLOW_DYNF_METADATA(pkts[pos + 3]) =
+ cq[pos + p3].flow_table_metadata;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos]))
+ pkts[pos]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 1]))
+ pkts[pos + 1]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 2]))
+ pkts[pos + 2]->ol_flags |= PKT_RX_DYNF_METADATA;
+ if (*RTE_FLOW_DYNF_METADATA(pkts[pos + 3]))
+ pkts[pos + 3]->ol_flags |= PKT_RX_DYNF_METADATA;
+ }
#ifdef MLX5_PMD_SOFT_COUNTERS
/* Add up received bytes count. */
byte_cnt = _mm_shuffle_epi8(op_own, len_shuf_mask);