net/mlx5: update LRO fields in completion entry
authorMatan Azrad <matan@mellanox.com>
Mon, 22 Jul 2019 14:52:21 +0000 (14:52 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Tue, 23 Jul 2019 12:31:36 +0000 (14:31 +0200)
Update the CQE structure to include LRO fields.

Some reserved values were changed, hence also data-path code used the
reserved values were updated accordingly.

Signed-off-by: Matan Azrad <matan@mellanox.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@mellanox.com>
drivers/net/mlx5/mlx5_prm.h
drivers/net/mlx5/mlx5_rxtx_vec.h
drivers/net/mlx5/mlx5_rxtx_vec_sse.h

index b0e281f..3f73a28 100644 (file)
@@ -317,13 +317,19 @@ struct mlx5_cqe {
        uint8_t pkt_info;
        uint8_t rsvd0;
        uint16_t wqe_id;
-       uint8_t rsvd3[8];
+       uint8_t lro_tcppsh_abort_dupack;
+       uint8_t lro_min_ttl;
+       uint16_t lro_tcp_win;
+       uint32_t lro_ack_seq_num;
        uint32_t rx_hash_res;
        uint8_t rx_hash_type;
-       uint8_t rsvd1[11];
+       uint8_t rsvd1[3];
+       uint16_t csum;
+       uint8_t rsvd2[6];
        uint16_t hdr_type_etc;
        uint16_t vlan_info;
-       uint8_t rsvd2[12];
+       uint8_t lro_num_seg;
+       uint8_t rsvd3[11];
        uint32_t byte_cnt;
        uint64_t timestamp;
        uint32_t sop_drop_qpn;
index 4220b08..b54ff72 100644 (file)
@@ -60,13 +60,11 @@ S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, pkt_info) == 0);
 #endif
 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rx_hash_res) ==
                  offsetof(struct mlx5_cqe, pkt_info) + 12);
-S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) +
-                 sizeof(((struct mlx5_cqe *)0)->rsvd1) ==
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd1) + 11 ==
                  offsetof(struct mlx5_cqe, hdr_type_etc));
 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, vlan_info) ==
                  offsetof(struct mlx5_cqe, hdr_type_etc) + 2);
-S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, rsvd2) +
-                 sizeof(((struct mlx5_cqe *)0)->rsvd2) ==
+S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, lro_num_seg) + 12 ==
                  offsetof(struct mlx5_cqe, byte_cnt));
 S_ASSERT_MLX5_CQE(offsetof(struct mlx5_cqe, sop_drop_qpn) ==
                  RTE_ALIGN(offsetof(struct mlx5_cqe, sop_drop_qpn), 8));
index 7bd254f..ca8ed41 100644 (file)
@@ -533,12 +533,12 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos + p2]);
                cqes[3] = _mm_blendv_epi8(cqes[3], cqe_tmp2, blend_mask);
                cqes[2] = _mm_blendv_epi8(cqes[2], cqe_tmp1, blend_mask);
-               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].rsvd1[3]);
-               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].rsvd1[3]);
+               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p3].csum);
+               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos + p2].csum);
                cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x30);
                cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x30);
-               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd2[10]);
-               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd2[10]);
+               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p3].rsvd3[9]);
+               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos + p2].rsvd3[9]);
                cqes[3] = _mm_blend_epi16(cqes[3], cqe_tmp2, 0x04);
                cqes[2] = _mm_blend_epi16(cqes[2], cqe_tmp1, 0x04);
                /* C.2 generate final structure for mbuf with swapping bytes. */
@@ -560,12 +560,12 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n,
                cqe_tmp1 = _mm_load_si128((__m128i *)&cq[pos]);
                cqes[1] = _mm_blendv_epi8(cqes[1], cqe_tmp2, blend_mask);
                cqes[0] = _mm_blendv_epi8(cqes[0], cqe_tmp1, blend_mask);
-               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].rsvd1[3]);
-               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].rsvd1[3]);
+               cqe_tmp2 = _mm_loadu_si128((__m128i *)&cq[pos + p1].csum);
+               cqe_tmp1 = _mm_loadu_si128((__m128i *)&cq[pos].csum);
                cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x30);
                cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x30);
-               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd2[10]);
-               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd2[10]);
+               cqe_tmp2 = _mm_loadl_epi64((__m128i *)&cq[pos + p1].rsvd3[9]);
+               cqe_tmp1 = _mm_loadl_epi64((__m128i *)&cq[pos].rsvd3[9]);
                cqes[1] = _mm_blend_epi16(cqes[1], cqe_tmp2, 0x04);
                cqes[0] = _mm_blend_epi16(cqes[0], cqe_tmp1, 0x04);
                /* C.2 generate final structure for mbuf with swapping bytes. */