NIX_TX_FASTPATH_MODES
#undef T
+
static inline void
pick_tx_func(struct rte_eth_dev *eth_dev,
const eth_tx_burst_t tx_burst[2][2][2][2][2])
void
cn9k_eth_set_tx_function(struct rte_eth_dev *eth_dev)
{
+ struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+
const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = {
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
[f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_##name,
#undef T
};
+ const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2] = {
+#define T(name, f4, f3, f2, f1, f0, sz, flags) \
+ [f4][f3][f2][f1][f0] = cn9k_nix_xmit_pkts_mseg_##name,
+
+ NIX_TX_FASTPATH_MODES
+#undef T
+ };
+
pick_tx_func(eth_dev, nix_eth_tx_burst);
+ if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
+ pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);
+
rte_mb();
}
return roc_lmt_submit_ldeorl(io_addr);
}
+static __rte_always_inline uint16_t
+cn9k_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags)
+{
+ struct nix_send_hdr_s *send_hdr;
+ union nix_send_sg_s *sg;
+ struct rte_mbuf *m_next;
+ uint64_t *slist, sg_u;
+ uint64_t nb_segs;
+ uint64_t segdw;
+ uint8_t off, i;
+
+ send_hdr = (struct nix_send_hdr_s *)cmd;
+ send_hdr->w0.total = m->pkt_len;
+ send_hdr->w0.aura = roc_npa_aura_handle_to_aura(m->pool->pool_id);
+
+ if (flags & NIX_TX_NEED_EXT_HDR)
+ off = 2;
+ else
+ off = 0;
+
+ sg = (union nix_send_sg_s *)&cmd[2 + off];
+ /* Clear sg->u header before use */
+ sg->u &= 0xFC00000000000000;
+ sg_u = sg->u;
+ slist = &cmd[3 + off];
+
+ i = 0;
+ nb_segs = m->nb_segs;
+
+ /* Fill mbuf segments */
+ do {
+ m_next = m->next;
+ sg_u = sg_u | ((uint64_t)m->data_len << (i << 4));
+ *slist = rte_mbuf_data_iova(m);
+ /* Set invert df if buffer is not to be freed by H/W */
+ if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
+ sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55));
+ /* Commit changes to mbuf */
+ rte_io_wmb();
+ }
+ /* Mark mempool object as "put" since it is freed by NIX */
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ if (!(sg_u & (1ULL << (i + 55))))
+ __mempool_check_cookies(m->pool, (void **)&m, 1, 0);
+ rte_io_wmb();
+#endif
+ slist++;
+ i++;
+ nb_segs--;
+ if (i > 2 && nb_segs) {
+ i = 0;
+ /* Next SG subdesc */
+ *(uint64_t *)slist = sg_u & 0xFC00000000000000;
+ sg->u = sg_u;
+ sg->segs = 3;
+ sg = (union nix_send_sg_s *)slist;
+ sg_u = sg->u;
+ slist++;
+ }
+ m = m_next;
+ } while (nb_segs);
+
+ sg->u = sg_u;
+ sg->segs = i;
+ segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off];
+ /* Roundup extra dwords to multiple of 2 */
+ segdw = (segdw >> 1) + (segdw & 0x1);
+ /* Default dwords */
+ segdw += (off >> 1) + 1;
+ send_hdr->w0.sizem1 = segdw - 1;
+
+ return segdw;
+}
+
+static __rte_always_inline void
+cn9k_nix_xmit_mseg_prep_lmt(uint64_t *cmd, void *lmt_addr, uint16_t segdw)
+{
+ roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+}
+
+static __rte_always_inline void
+cn9k_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, rte_iova_t io_addr,
+ uint16_t segdw)
+{
+ uint64_t lmt_status;
+
+ do {
+ roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+ lmt_status = roc_lmt_submit_ldeor(io_addr);
+ } while (lmt_status == 0);
+}
+
+static __rte_always_inline void
+cn9k_nix_xmit_mseg_one_release(uint64_t *cmd, void *lmt_addr,
+ rte_iova_t io_addr, uint16_t segdw)
+{
+ uint64_t lmt_status;
+
+ rte_io_wmb();
+ do {
+ roc_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw);
+ lmt_status = roc_lmt_submit_ldeor(io_addr);
+ } while (lmt_status == 0);
+}
+
static __rte_always_inline uint16_t
cn9k_nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts,
uint64_t *cmd, const uint16_t flags)
return pkts;
}
+static __rte_always_inline uint16_t
+cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t pkts, uint64_t *cmd, const uint16_t flags)
+{
+ struct cn9k_eth_txq *txq = tx_queue;
+ const rte_iova_t io_addr = txq->io_addr;
+ void *lmt_addr = txq->lmt_addr;
+ uint64_t lso_tun_fmt;
+ uint16_t segdw;
+ uint64_t i;
+
+ NIX_XMIT_FC_OR_RETURN(txq, pkts);
+
+ roc_lmt_mov(cmd, &txq->cmd[0], cn9k_nix_tx_ext_subs(flags));
+
+ /* Perform header writes before barrier for TSO */
+ if (flags & NIX_TX_OFFLOAD_TSO_F) {
+ lso_tun_fmt = txq->lso_tun_fmt;
+
+ for (i = 0; i < pkts; i++)
+ cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags);
+ }
+
+ /* Lets commit any changes in the packet here as no further changes
+ * to the packet will be done unless no fast free is enabled.
+ */
+ if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
+ rte_io_wmb();
+
+ for (i = 0; i < pkts; i++) {
+ cn9k_nix_xmit_prepare(tx_pkts[i], cmd, flags, lso_tun_fmt);
+ segdw = cn9k_nix_prepare_mseg(tx_pkts[i], cmd, flags);
+ cn9k_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
+ }
+
+ /* Reduce the cached count */
+ txq->fc_cache_pkts -= pkts;
+
+ return pkts;
+}
+
+
#define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F
#define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F
#define VLAN_F NIX_TX_OFFLOAD_VLAN_QINQ_F
#define T(name, f4, f3, f2, f1, f0, sz, flags) \
uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_##name( \
+ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \
+ \
+ uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_mseg_##name( \
void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts);
NIX_TX_FASTPATH_MODES