From: Nithin Dabilpuram Date: Sun, 2 Jun 2019 11:34:21 +0000 (+0530) Subject: net/octeontx2: add Tx multi segment version X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=cbd5710db48d;p=dpdk.git net/octeontx2: add Tx multi segment version Add multi segment version of packet Transmit function. Signed-off-by: Nithin Dabilpuram Signed-off-by: Pavan Nikhilesh --- diff --git a/drivers/net/octeontx2/otx2_ethdev.h b/drivers/net/octeontx2/otx2_ethdev.h index 1f9323fe32..f39fdfa1fa 100644 --- a/drivers/net/octeontx2/otx2_ethdev.h +++ b/drivers/net/octeontx2/otx2_ethdev.h @@ -89,6 +89,10 @@ #define NIX_TX_NB_SEG_MAX 9 #endif +#define NIX_TX_MSEG_SG_DWORDS \ + ((RTE_ALIGN_MUL_CEIL(NIX_TX_NB_SEG_MAX, 3) / 3) \ + + NIX_TX_NB_SEG_MAX) + /* Apply BP when CQ is 75% full */ #define NIX_CQ_BP_LEVEL (25 * 256 / 100) diff --git a/drivers/net/octeontx2/otx2_tx.c b/drivers/net/octeontx2/otx2_tx.c index 16d69b74fc..0ac5ea6527 100644 --- a/drivers/net/octeontx2/otx2_tx.c +++ b/drivers/net/octeontx2/otx2_tx.c @@ -49,6 +49,37 @@ nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return pkts; } +static __rte_always_inline uint16_t +nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t pkts, uint64_t *cmd, const uint16_t flags) +{ + struct otx2_eth_txq *txq = tx_queue; uint64_t i; + const rte_iova_t io_addr = txq->io_addr; + void *lmt_addr = txq->lmt_addr; + uint16_t segdw; + + NIX_XMIT_FC_OR_RETURN(txq, pkts); + + otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags)); + + /* Lets commit any changes in the packet */ + rte_cio_wmb(); + + for (i = 0; i < pkts; i++) { + otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags); + segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags); + otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0], + tx_pkts[i]->ol_flags, segdw, + flags); + otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw); + } + + /* Reduce the cached count */ + txq->fc_cache_pkts -= pkts; + + return pkts; +} + #define T(name, f4, f3, f2, f1, f0, sz, flags) \ static uint16_t __rte_noinline __hot \ otx2_nix_xmit_pkts_ ## name(void *tx_queue, \ @@ -62,6 +93,20 @@ otx2_nix_xmit_pkts_ ## name(void *tx_queue, \ NIX_TX_FASTPATH_MODES #undef T +#define T(name, f4, f3, f2, f1, f0, sz, flags) \ +static uint16_t __rte_noinline __hot \ +otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \ + struct rte_mbuf **tx_pkts, uint16_t pkts) \ +{ \ + uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \ + \ + return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \ + (flags) | NIX_TX_MULTI_SEG_F); \ +} + +NIX_TX_FASTPATH_MODES +#undef T + static inline void pick_tx_func(struct rte_eth_dev *eth_dev, const eth_tx_burst_t tx_burst[2][2][2][2][2]) @@ -80,15 +125,28 @@ pick_tx_func(struct rte_eth_dev *eth_dev, void otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev) { + struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev); + const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = { #define T(name, f4, f3, f2, f1, f0, sz, flags) \ [f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name, +NIX_TX_FASTPATH_MODES +#undef T + }; + + const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2] = { +#define T(name, f4, f3, f2, f1, f0, sz, flags) \ + [f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name, + NIX_TX_FASTPATH_MODES #undef T }; pick_tx_func(eth_dev, nix_eth_tx_burst); + if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + pick_tx_func(eth_dev, nix_eth_tx_burst_mseg); + rte_mb(); } diff --git a/drivers/net/octeontx2/otx2_tx.h b/drivers/net/octeontx2/otx2_tx.h index db4c1f70f0..b75a220ea4 100644 --- a/drivers/net/octeontx2/otx2_tx.h +++ b/drivers/net/octeontx2/otx2_tx.h @@ -212,6 +212,87 @@ otx2_nix_xmit_one(uint64_t *cmd, void *lmt_addr, } while (lmt_status == 0); } +static __rte_always_inline uint16_t +otx2_nix_prepare_mseg(struct rte_mbuf *m, uint64_t *cmd, const uint16_t flags) +{ + struct nix_send_hdr_s *send_hdr; + union nix_send_sg_s *sg; + struct rte_mbuf *m_next; + uint64_t *slist, sg_u; + uint64_t nb_segs; + uint64_t segdw; + uint8_t off, i; + + send_hdr = (struct nix_send_hdr_s *)cmd; + send_hdr->w0.total = m->pkt_len; + send_hdr->w0.aura = npa_lf_aura_handle_to_aura(m->pool->pool_id); + + if (flags & NIX_TX_NEED_EXT_HDR) + off = 2; + else + off = 0; + + sg = (union nix_send_sg_s *)&cmd[2 + off]; + sg_u = sg->u; + slist = &cmd[3 + off]; + + i = 0; + nb_segs = m->nb_segs; + + /* Fill mbuf segments */ + do { + m_next = m->next; + sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); + *slist = rte_mbuf_data_iova(m); + /* Set invert df if reference count > 1 */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) + sg_u |= + ((uint64_t)(rte_pktmbuf_prefree_seg(m) == NULL) << + (i + 55)); + /* Mark mempool object as "put" since it is freed by NIX */ + if (!(sg_u & (1ULL << (i + 55)))) { + m->next = NULL; + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + } + slist++; + i++; + nb_segs--; + if (i > 2 && nb_segs) { + i = 0; + /* Next SG subdesc */ + *(uint64_t *)slist = sg_u & 0xFC00000000000000; + sg->u = sg_u; + sg->segs = 3; + sg = (union nix_send_sg_s *)slist; + sg_u = sg->u; + slist++; + } + m = m_next; + } while (nb_segs); + + sg->u = sg_u; + sg->segs = i; + segdw = (uint64_t *)slist - (uint64_t *)&cmd[2 + off]; + /* Roundup extra dwords to multiple of 2 */ + segdw = (segdw >> 1) + (segdw & 0x1); + /* Default dwords */ + segdw += (off >> 1) + 1 + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + send_hdr->w0.sizem1 = segdw - 1; + + return segdw; +} + +static __rte_always_inline void +otx2_nix_xmit_mseg_one(uint64_t *cmd, void *lmt_addr, + rte_iova_t io_addr, uint16_t segdw) +{ + uint64_t lmt_status; + + do { + otx2_lmt_mov_seg(lmt_addr, (const void *)cmd, segdw); + lmt_status = otx2_lmt_submit(io_addr); + } while (lmt_status == 0); +} #define L3L4CSUM_F NIX_TX_OFFLOAD_L3_L4_CSUM_F #define OL3OL4CSUM_F NIX_TX_OFFLOAD_OL3_OL4_CSUM_F