__mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
1, 0);
/* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
- tx_pkt->pool->pool_id);
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)tx_pkt->pool->pool_id);
/* Setup PKO_SEND_BUFLINK_S */
cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
/* To handle case where mbufs belong to diff pools, like
* fragmentation
*/
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
tx_pkt->pool->pool_id);
/* Setup PKO_SEND_GATHER_S */
struct octeontx_txq *txq = tx_queue;
octeontx_dq_t *dq = &txq->dq;
uint16_t count = 0, nb_desc;
- rte_cio_wmb();
+ rte_io_wmb();
while (count < nb_pkts) {
if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
/* RX offload macros */
#define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
+#define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
#define MULT_RX_F OCCTX_RX_MULTI_SEG_F
-/* [VLAN_FLTR][MULTI_SEG] */
+
+/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
#define OCCTX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
-R(mseg, 0, 1, MULT_RX_F) \
-R(vlan, 1, 0, VLAN_FLTR_F) \
-R(vlan_mseg, 1, 1, VLAN_FLTR_F | MULT_RX_F)
+R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
+R(mseg, 0, 0, 1, MULT_RX_F) \
+R(csum, 0, 1, 0, CSUM_F) \
+R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
+R(vlan, 1, 0, 0, VLAN_FLTR_F) \
+R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
+R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
+R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
+ MULT_RX_F)
#endif /* __OCTEONTX_RXTX_H__ */