X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxtx_vec_neon.h;h=e11565f696562c4ef62285047249efa46788e8ac;hb=3fc8de4f8df4a5f9ca23b0bc2d1ab592719c5daf;hp=6dd18b61957cfada4ac2d027420b06280ecd6ff2;hpb=570acdb1da8ade1a618a736b5db35f3a1379ff71;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h index 6dd18b6195..e11565f696 100644 --- a/drivers/net/mlx5/mlx5_rxtx_vec_neon.h +++ b/drivers/net/mlx5/mlx5_rxtx_vec_neon.h @@ -60,7 +60,7 @@ * @param txq * Pointer to TX queue structure. * @param dseg - * Pointer to buffer descriptor to be writen. + * Pointer to buffer descriptor to be written. * @param pkts * Pointer to array of packets to be sent. * @param n @@ -135,6 +135,8 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, assert(elts_n > pkts_n); mlx5_tx_complete(txq); + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); if (unlikely(!pkts_n)) return 0; for (n = 0; n < pkts_n; ++n) { @@ -149,7 +151,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, 11, 10, 9, 8, /* bswap32 */ 12, 13, 14, 15 }; - uint8_t cs_flags = 0; + uint8_t cs_flags; uint16_t max_elts; uint16_t max_wqe; uint8x16_t *t_wqe; @@ -168,22 +170,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, break; wqe = &((volatile struct mlx5_wqe64 *) txq->wqes)[wqe_ci & wq_mask].hdr; - if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - const uint64_t is_tunneled = - buf->ol_flags & (PKT_TX_TUNNEL_GRE | - PKT_TX_TUNNEL_VXLAN); - - if (is_tunneled && txq->tunnel_en) { - cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | - MLX5_ETH_WQE_L4_INNER_CSUM; - if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) - cs_flags |= MLX5_ETH_WQE_L3_CSUM; - } else { - cs_flags = MLX5_ETH_WQE_L3_CSUM | - MLX5_ETH_WQE_L4_CSUM; - } - } + cs_flags = txq_ol_cksum_to_cs(txq, buf); /* Title WQEBB pointer. */ t_wqe = (uint8x16_t *)wqe; dseg = (uint8_t *)(wqe + 1); @@ -220,7 +207,9 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, wqe->ctrl[2] = rte_cpu_to_be_32(8); wqe->ctrl[3] = txq->elts_head; txq->elts_comp = 0; +#ifndef NDEBUG ++txq->cq_pi; +#endif } #ifdef MLX5_PMD_SOFT_COUNTERS txq->stats.opackets += n; @@ -233,7 +222,7 @@ txq_scatter_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, * Send burst of packets with Enhanced MPW. If it encounters a multi-seg packet, * it returns to make it processed by txq_scatter_v(). All the packets in * the pkts list should be single segment packets having same offload flags. - * This must be checked by txq_check_multiseg() and txq_calc_offload(). + * This must be checked by txq_count_contig_single_seg() and txq_calc_offload(). * * @param txq * Pointer to TX queue structure. @@ -284,6 +273,8 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, assert(elts_n > pkts_n); mlx5_tx_complete(txq); max_elts = (elts_n - (elts_head - txq->elts_tail)); + /* A CQE slot must always be available. */ + assert((1u << txq->cqe_n) - (txq->cq_pi - txq->cq_ci)); max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi); pkts_n = RTE_MIN((unsigned int)RTE_MIN(pkts_n, max_wqe), max_elts); if (unlikely(!pkts_n)) @@ -321,7 +312,9 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, } else { /* Request a completion. */ txq->elts_comp = 0; +#ifndef NDEBUG ++txq->cq_pi; +#endif comp_req = 8; } /* Fill CTRL in the header. */ @@ -345,7 +338,7 @@ txq_burst_v(struct mlx5_txq_data *txq, struct rte_mbuf **pkts, uint16_t pkts_n, txq->wqe_ci += (nb_dword_in_hdr + pkts_n + (nb_dword_per_wqebb - 1)) / nb_dword_per_wqebb; /* Ring QP doorbell. */ - mlx5_tx_dbrec(txq, wqe); + mlx5_tx_dbrec_cond_wmb(txq, wqe, pkts_n < MLX5_VPMD_TX_MAX_BURST); return pkts_n; } @@ -445,13 +438,6 @@ rxq_cq_decompress_v(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cq, 31, 30 /* 4th mCQE */ }; - /* - * Not to overflow elts array. Decompress next time after mbuf - * replenishment. - */ - if (unlikely(mcqe_n + MLX5_VPMD_DESCS_PER_LOOP > - (uint16_t)(rxq->rq_ci - rxq->cq_ci))) - return; /* * A. load mCQEs into a 128bit register. * B. store rearm data to mbuf. @@ -573,11 +559,13 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, { uint16x4_t ptype; uint32x4_t pinfo, cv_flags; - uint32x4_t ol_flags = vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH); + uint32x4_t ol_flags = + vdupq_n_u32(rxq->rss_hash * PKT_RX_RSS_HASH | + rxq->hw_timestamp * PKT_RX_TIMESTAMP); const uint32x4_t ptype_ol_mask = { 0x106, 0x106, 0x106, 0x106 }; const uint8x16_t cv_flag_sel = { 0, - (uint8_t)(PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED), + (uint8_t)(PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED), (uint8_t)(PKT_RX_IP_CKSUM_GOOD >> 1), 0, (uint8_t)(PKT_RX_L4_CKSUM_GOOD >> 1), @@ -587,7 +575,7 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, }; const uint32x4_t cv_mask = vdupq_n_u32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED); + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED); const uint64x1_t mbuf_init = vld1_u64(&rxq->mbuf_initializer); const uint64x1_t r32_mask = vcreate_u64(0xffffffff); uint64x2_t rearm0, rearm1, rearm2, rearm3; @@ -595,11 +583,15 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, if (rxq->mark) { const uint32x4_t ft_def = vdupq_n_u32(MLX5_FLOW_MARK_DEFAULT); const uint32x4_t fdir_flags = vdupq_n_u32(PKT_RX_FDIR); - const uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t fdir_id_flags = vdupq_n_u32(PKT_RX_FDIR_ID); + uint32x4_t invalid_mask; /* Check if flow tag is non-zero then set PKT_RX_FDIR. */ - ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_flags, - vceqzq_u32(flow_tag))); + invalid_mask = vceqzq_u32(flow_tag); + ol_flags = vorrq_u32(ol_flags, + vbicq_u32(fdir_flags, invalid_mask)); + /* Mask out invalid entries. */ + fdir_id_flags = vbicq_u32(fdir_id_flags, invalid_mask); /* Check if flow tag MLX5_FLOW_MARK_DEFAULT. */ ol_flags = vorrq_u32(ol_flags, vbicq_u32(fdir_id_flags, @@ -670,12 +662,16 @@ rxq_cq_to_ptype_oflags_v(struct mlx5_rxq_data *rxq, * Array to store received packets. * @param pkts_n * Maximum number of packets in array. + * @param[out] err + * Pointer to a flag. Set non-zero value if pkts array has at least one error + * packet to handle. * * @return * Number of packets received including errors (<= pkts_n). */ static inline uint16_t -rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n, + uint64_t *err) { const uint16_t q_n = 1 << rxq->cqe_n; const uint16_t q_mask = q_n - 1; @@ -778,10 +774,8 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } elts_idx = rxq->rq_pi & q_mask; elts = &(*rxq->elts)[elts_idx]; - pkts_n = RTE_MIN(pkts_n - rcvd_pkt, - (uint16_t)(rxq->rq_ci - rxq->cq_ci)); - /* Not to overflow pkts/elts array. */ - pkts_n = RTE_ALIGN_FLOOR(pkts_n, MLX5_VPMD_DESCS_PER_LOOP); + /* Not to overflow pkts array. */ + pkts_n = RTE_ALIGN_FLOOR(pkts_n - rcvd_pkt, MLX5_VPMD_DESCS_PER_LOOP); /* Not to cross queue end. */ pkts_n = RTE_MIN(pkts_n, q_n - elts_idx); if (!pkts_n) @@ -977,11 +971,28 @@ rxq_burst_v(struct mlx5_rxq_data *rxq, struct rte_mbuf **pkts, uint16_t pkts_n) opcode = vceq_u16(resp_err_check, opcode); opcode = vbic_u16(opcode, invalid_mask); /* D.4 mark if any error is set */ - rxq->pending_err |= - !!vget_lane_u64(vreinterpret_u64_u16(opcode), 0); + *err |= vget_lane_u64(vreinterpret_u64_u16(opcode), 0); /* C.4 fill in mbuf - rearm_data and packet_type. */ rxq_cq_to_ptype_oflags_v(rxq, ptype_info, flow_tag, opcode, &elts[pos]); + if (rxq->hw_timestamp) { + elts[pos]->timestamp = + rte_be_to_cpu_64( + container_of(p0, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 1]->timestamp = + rte_be_to_cpu_64( + container_of(p1, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 2]->timestamp = + rte_be_to_cpu_64( + container_of(p2, struct mlx5_cqe, + pkt_info)->timestamp); + elts[pos + 3]->timestamp = + rte_be_to_cpu_64( + container_of(p3, struct mlx5_cqe, + pkt_info)->timestamp); + } #ifdef MLX5_PMD_SOFT_COUNTERS /* Add up received bytes count. */ byte_cnt = vbic_u16(byte_cnt, invalid_mask);