+ struct qede_agg_info *tpa_info;
+ struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
+ uint16_t cons_idx;
+
+ /* Under certain conditions it is possible that FW may not consume
+ * additional or new BD. So decision to consume the BD must be made
+ * based on len_list[0].
+ */
+ if (rte_le_to_cpu_16(len)) {
+ tpa_info = &rxq->tpa_info[agg_index];
+ cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
+ assert(curr_frag);
+ curr_frag->nb_segs = 1;
+ curr_frag->pkt_len = rte_le_to_cpu_16(len);
+ curr_frag->data_len = curr_frag->pkt_len;
+ tpa_info->tpa_tail->next = curr_frag;
+ tpa_info->tpa_tail = curr_frag;
+ qede_rx_bd_ring_consume(rxq);
+ if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
+ PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
+ rxq->rx_alloc_errors++;
+ }
+ }
+}
+
+static inline void
+qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_cont_cqe *cqe)
+{
+ PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
+ cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
+ /* only len_list[0] will have value */
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+}
+
+static inline void
+qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
+ struct qede_rx_queue *rxq,
+ struct eth_fast_path_rx_tpa_end_cqe *cqe)
+{
+ struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
+
+ qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
+ cqe->len_list[0]);
+ /* Update total length and frags based on end TPA */
+ rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
+ /* TODO: Add Sanity Checks */
+ rx_mb->nb_segs = cqe->num_of_bds;
+ rx_mb->pkt_len = cqe->total_packet_len;
+
+ PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
+ " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
+ rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
+ rx_mb->pkt_len);
+}
+
+static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
+{
+ uint32_t val;
+
+ /* Lookup table */
+ static const uint32_t
+ ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
+ [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
+ [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE,
+ [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
+ RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
+ RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
+ [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
+ RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
+ };
+
+ /* Cover bits[4-0] to include tunn_type and next protocol */
+ val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
+ (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
+
+ if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
+ return ptype_tunn_lkup_tbl[val];
+ else
+ return RTE_PTYPE_UNKNOWN;
+}
+
+static inline int
+qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
+ uint8_t num_segs, uint16_t pkt_len)
+{
+ struct qede_rx_queue *rxq = p_rxq;
+ struct qede_dev *qdev = rxq->qdev;
+ register struct rte_mbuf *seg1 = NULL;
+ register struct rte_mbuf *seg2 = NULL;
+ uint16_t sw_rx_index;
+ uint16_t cur_size;
+
+ seg1 = rx_mb;
+ while (num_segs) {
+ cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
+ pkt_len;
+ if (unlikely(!cur_size)) {
+ PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
+ " left for mapping jumbo\n", num_segs);
+ qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
+ return -EINVAL;
+ }
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
+ seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
+ qede_rx_bd_ring_consume(rxq);
+ pkt_len -= cur_size;
+ seg2->data_len = cur_size;
+ seg1->next = seg2;
+ seg1 = seg1->next;
+ num_segs--;
+ rxq->rx_segs++;
+ }
+
+ return 0;