+ if (hw_comp_cons == sw_comp_cons)
+ return 0;
+
+ while (sw_comp_cons != hw_comp_cons) {
+ ol_flags = 0;
+ packet_type = RTE_PTYPE_UNKNOWN;
+ vlan_tci = 0;
+ tpa_start_flg = false;
+ rss_hash = 0;
+
+ /* Get the CQE from the completion ring */
+ cqe =
+ (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
+ cqe_type = cqe->fast_path_regular.type;
+ PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
+
+ switch (cqe_type) {
+ case ETH_RX_CQE_TYPE_REGULAR:
+ fp_cqe = &cqe->fast_path_regular;
+ break;
+ case ETH_RX_CQE_TYPE_TPA_START:
+ cqe_start_tpa = &cqe->fast_path_tpa_start;
+ tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
+ tpa_start_flg = true;
+ /* Mark it as LRO packet */
+ ol_flags |= PKT_RX_LRO;
+ /* In split mode, seg_len is same as len_on_first_bd
+ * and bw_ext_bd_len_list will be empty since there are
+ * no additional buffers
+ */
+ PMD_RX_LOG(INFO, rxq,
+ "TPA start[%d] - len_on_first_bd %d header %d"
+ " [bd_list[0] %d], [seg_len %d]\n",
+ cqe_start_tpa->tpa_agg_index,
+ rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
+ cqe_start_tpa->header_len,
+ rte_le_to_cpu_16(cqe_start_tpa->bw_ext_bd_len_list[0]),
+ rte_le_to_cpu_16(cqe_start_tpa->seg_len));
+
+ break;
+ case ETH_RX_CQE_TYPE_TPA_CONT:
+ qede_rx_process_tpa_cont_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_cont);
+ goto next_cqe;
+ case ETH_RX_CQE_TYPE_TPA_END:
+ qede_rx_process_tpa_end_cqe(qdev, rxq,
+ &cqe->fast_path_tpa_end);
+ tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
+ tpa_info = &rxq->tpa_info[tpa_agg_idx];
+ rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
+ goto tpa_end;
+ case ETH_RX_CQE_TYPE_SLOW_PATH:
+ PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
+ ecore_eth_cqe_completion(
+ &edev->hwfns[rxq->queue_id % edev->num_hwfns],
+ (struct eth_slow_path_rx_cqe *)cqe);
+ /* fall-thru */
+ default:
+ goto next_cqe;
+ }
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ assert(rx_mb != NULL);
+
+ /* Handle regular CQE or TPA start CQE */
+ if (!tpa_start_flg) {
+ parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
+ offset = fp_cqe->placement_offset;
+ len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
+ pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
+ vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
+ rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = fp_cqe->bitfields;
+#endif
+ } else {
+ parse_flag =
+ rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
+ offset = cqe_start_tpa->placement_offset;
+ /* seg_len = len_on_first_bd */
+ len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
+ vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
+#ifdef RTE_LIBRTE_QEDE_DEBUG_RX
+ bitfield_val = cqe_start_tpa->bitfields;
+#endif
+ rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
+ }
+ if (qede_tunn_exist(parse_flag)) {
+ PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
+ if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+
+ if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "Outer L3 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_EIP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ }
+
+ if (tpa_start_flg)
+ flags = cqe_start_tpa->tunnel_pars_flags.flags;
+ else
+ flags = fp_cqe->tunnel_pars_flags.flags;
+ tunn_parse_flag = flags;
+
+ /* Tunnel_type */
+ packet_type =
+ qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
+
+ /* Inner header */
+ packet_type |=
+ qede_rx_cqe_to_pkt_type_inner(parse_flag);
+
+ /* Outer L3/L4 types is not available in CQE */
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+
+ /* Outer L3/L4 types is not available in CQE.
+ * Need to add offset to parse correctly,
+ */
+ rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
+ packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+ } else {
+ packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
+ }
+
+ /* Common handling for non-tunnel packets and for inner
+ * headers in the case of tunnel.
+ */
+ if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
+ PMD_RX_LOG(ERR, rxq,
+ "L4 csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
+ PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
+ parse_flag);
+ rxq->rx_hw_errors++;
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ } else {
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ }