#ifndef __OCTEONTX_RXTX_H__
#define __OCTEONTX_RXTX_H__
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#define OFFLOAD_FLAGS \
uint16_t rx_offload_flags; \
static __rte_always_inline uint64_t
-octeontx_pktmbuf_detach(struct rte_mbuf *m)
+octeontx_pktmbuf_detach(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
{
struct rte_mempool *mp = m->pool;
uint32_t mbuf_size, buf_len;
/* Update refcount of direct mbuf */
md = rte_mbuf_from_indirect(m);
+ /* The real data will be in the direct buffer, inform callers this */
+ *m_tofree = md;
refcount = rte_mbuf_refcnt_update(md, -1);
priv_size = rte_pktmbuf_priv_size(mp);
}
static __rte_always_inline uint64_t
-octeontx_prefree_seg(struct rte_mbuf *m)
+octeontx_prefree_seg(struct rte_mbuf *m, struct rte_mbuf **m_tofree)
{
if (likely(rte_mbuf_refcnt_read(m) == 1)) {
if (!RTE_MBUF_DIRECT(m))
- return octeontx_pktmbuf_detach(m);
+ return octeontx_pktmbuf_detach(m, m_tofree);
m->next = NULL;
m->nb_segs = 1;
return 0;
} else if (rte_mbuf_refcnt_update(m, -1) == 0) {
if (!RTE_MBUF_DIRECT(m))
- return octeontx_pktmbuf_detach(m);
+ return octeontx_pktmbuf_detach(m, m_tofree);
rte_mbuf_refcnt_set(m, 1);
m->next = NULL;
* 0x2 - TCP L4 checksum
* 0x3 - SCTP L4 checksum
*/
- const uint8_t csum = (!(((ol_flags ^ PKT_TX_UDP_CKSUM) >> 52) & 0x3) +
- (!(((ol_flags ^ PKT_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
- (!(((ol_flags ^ PKT_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
-
- const uint8_t is_tunnel_parsed = (!!(ol_flags & PKT_TX_TUNNEL_GTP) ||
- !!(ol_flags & PKT_TX_TUNNEL_VXLAN_GPE) ||
- !!(ol_flags & PKT_TX_TUNNEL_VXLAN) ||
- !!(ol_flags & PKT_TX_TUNNEL_GRE) ||
- !!(ol_flags & PKT_TX_TUNNEL_GENEVE) ||
- !!(ol_flags & PKT_TX_TUNNEL_IP) ||
- !!(ol_flags & PKT_TX_TUNNEL_IPIP));
-
- const uint8_t csum_outer = (!!(ol_flags & PKT_TX_OUTER_UDP_CKSUM) ||
- !!(ol_flags & PKT_TX_TUNNEL_UDP));
+ const uint8_t csum = (!(((ol_flags ^ RTE_MBUF_F_TX_UDP_CKSUM) >> 52) & 0x3) +
+ (!(((ol_flags ^ RTE_MBUF_F_TX_TCP_CKSUM) >> 52) & 0x3) * 2) +
+ (!(((ol_flags ^ RTE_MBUF_F_TX_SCTP_CKSUM) >> 52) & 0x3) * 3));
+
+ const uint8_t is_tunnel_parsed = (!!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GTP) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_VXLAN) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GRE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_GENEVE) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IP) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_IPIP));
+
+ const uint8_t csum_outer = (!!(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) ||
+ !!(ol_flags & RTE_MBUF_F_TX_TUNNEL_UDP));
const uint8_t outer_l2_len = m->outer_l2_len;
const uint8_t l2_len = m->l2_len;
send_hdr->w0.l3ptr = outer_l2_len;
send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
/* Outer L4 */
send_hdr->w0.ckl4 = csum_outer;
/* Set clke for PKO to calculate inner IPV4 header
* checksum.
*/
- send_hdr->w0.ckle = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckle = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.cklf = csum;
send_hdr->w0.l3ptr = l2_len;
send_hdr->w0.l4ptr = l2_len + m->l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.ckl4 = csum;
send_hdr->w0.l3ptr = outer_l2_len;
send_hdr->w0.l4ptr = outer_l2_len + m->outer_l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_OUTER_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4);
/* Outer L4 */
send_hdr->w0.ckl4 = csum_outer;
send_hdr->w0.l3ptr = l2_len;
send_hdr->w0.l4ptr = l2_len + m->l3_len;
/* Set clk3 for PKO to calculate IPV4 header checksum */
- send_hdr->w0.ckl3 = !!(ol_flags & PKT_TX_IPV4);
+ send_hdr->w0.ckl3 = !!(ol_flags & RTE_MBUF_F_TX_IPV4);
/* Inner L4 */
send_hdr->w0.ckl4 = csum;
const uint16_t flag)
{
uint16_t gaura_id, nb_desc = 0;
+ struct rte_mbuf *m_tofree;
+ rte_iova_t iova;
+ uint16_t data_len;
+
+ m_tofree = tx_pkt;
+
+ data_len = tx_pkt->data_len;
+ iova = rte_mbuf_data_iova(tx_pkt);
/* Setup PKO_SEND_HDR_S */
cmd_buf[nb_desc++] = tx_pkt->data_len & 0xffff;
* not, as SG_DESC[I] and SEND_HDR[II] are clear.
*/
if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)
- cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt) <<
+ cmd_buf[0] |= (octeontx_prefree_seg(tx_pkt, &m_tofree) <<
58);
/* Mark mempool object as "put" since it is freed by PKO */
if (!(cmd_buf[0] & (1ULL << 58)))
- __mempool_check_cookies(tx_pkt->pool, (void **)&tx_pkt,
+ RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool, (void **)&m_tofree,
1, 0);
/* Get the gaura Id */
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
- tx_pkt->pool->pool_id);
+ gaura_id =
+ octeontx_fpa_bufpool_gaura((uintptr_t)m_tofree->pool->pool_id);
/* Setup PKO_SEND_BUFLINK_S */
cmd_buf[nb_desc++] = PKO_SEND_BUFLINK_SUBDC |
PKO_SEND_BUFLINK_LDTYPE(0x1ull) |
PKO_SEND_BUFLINK_GAUAR((long)gaura_id) |
- tx_pkt->data_len;
- cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+ data_len;
+ cmd_buf[nb_desc++] = iova;
return nb_desc;
}
{
uint16_t nb_segs, nb_desc = 0;
uint16_t gaura_id, len = 0;
- struct rte_mbuf *m_next = NULL;
+ struct rte_mbuf *m_next = NULL, *m_tofree;
+ rte_iova_t iova;
+ uint16_t data_len;
nb_segs = tx_pkt->nb_segs;
/* Setup PKO_SEND_HDR_S */
do {
m_next = tx_pkt->next;
- /* To handle case where mbufs belong to diff pools, like
- * fragmentation
+ /* Get TX parameters up front, octeontx_prefree_seg might change
+ * them
*/
- gaura_id = octeontx_fpa_bufpool_gpool((uintptr_t)
- tx_pkt->pool->pool_id);
+ m_tofree = tx_pkt;
+ data_len = tx_pkt->data_len;
+ iova = rte_mbuf_data_iova(tx_pkt);
/* Setup PKO_SEND_GATHER_S */
- cmd_buf[nb_desc] = PKO_SEND_GATHER_SUBDC |
- PKO_SEND_GATHER_LDTYPE(0x1ull) |
- PKO_SEND_GATHER_GAUAR((long)gaura_id) |
- tx_pkt->data_len;
+ cmd_buf[nb_desc] = 0;
/* SG_DESC[I] bit controls if buffer is to be freed or
* not, as SEND_HDR[DF] and SEND_HDR[II] are clear.
*/
if (flag & OCCTX_TX_OFFLOAD_MBUF_NOFF_F) {
cmd_buf[nb_desc] |=
- (octeontx_prefree_seg(tx_pkt) << 57);
+ (octeontx_prefree_seg(tx_pkt, &m_tofree) << 57);
}
+ /* To handle case where mbufs belong to diff pools, like
+ * fragmentation
+ */
+ gaura_id = octeontx_fpa_bufpool_gaura((uintptr_t)
+ m_tofree->pool->pool_id);
+
+ /* Setup PKO_SEND_GATHER_S */
+ cmd_buf[nb_desc] |= PKO_SEND_GATHER_SUBDC |
+ PKO_SEND_GATHER_LDTYPE(0x1ull) |
+ PKO_SEND_GATHER_GAUAR((long)gaura_id) |
+ data_len;
+
/* Mark mempool object as "put" since it is freed by
* PKO.
*/
if (!(cmd_buf[nb_desc] & (1ULL << 57))) {
tx_pkt->next = NULL;
- __mempool_check_cookies(tx_pkt->pool,
- (void **)&tx_pkt, 1, 0);
+ RTE_MEMPOOL_CHECK_COOKIES(m_tofree->pool,
+ (void **)&m_tofree, 1, 0);
}
nb_desc++;
- cmd_buf[nb_desc++] = rte_mbuf_data_iova(tx_pkt);
+ cmd_buf[nb_desc++] = iova;
nb_segs--;
- len += tx_pkt->data_len;
+ len += data_len;
tx_pkt = m_next;
} while (nb_segs);
struct octeontx_txq *txq = tx_queue;
octeontx_dq_t *dq = &txq->dq;
uint16_t count = 0, nb_desc;
- rte_cio_wmb();
+ rte_io_wmb();
while (count < nb_pkts) {
if (unlikely(*((volatile int64_t *)dq->fc_status_va) < 0))
/* RX offload macros */
#define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
+#define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
#define MULT_RX_F OCCTX_RX_MULTI_SEG_F
-/* [VLAN_FLTR][MULTI_SEG] */
+
+/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
#define OCCTX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
-R(mseg, 0, 1, MULT_RX_F) \
-R(vlan, 1, 0, VLAN_FLTR_F) \
-R(vlan_mseg, 1, 1, VLAN_FLTR_F | MULT_RX_F)
+R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
+R(mseg, 0, 0, 1, MULT_RX_F) \
+R(csum, 0, 1, 0, CSUM_F) \
+R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
+R(vlan, 1, 0, 0, VLAN_FLTR_F) \
+R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
+R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
+R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
+ MULT_RX_F)
#endif /* __OCTEONTX_RXTX_H__ */