X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fcnxk%2Fcn9k_tx.h;h=ed65cd351ff2d65ea35a5855a8c199e3bf9196cb;hb=a9a238e9f57dd4363535bfb78c0c5c762522014a;hp=cb574a1c1d93bb533b1a9d91ab00906b0b7885e7;hpb=99fc1f6eefa7850089d392bc682767e75bc79259;p=dpdk.git diff --git a/drivers/net/cnxk/cn9k_tx.h b/drivers/net/cnxk/cn9k_tx.h index cb574a1c1d..ed65cd351f 100644 --- a/drivers/net/cnxk/cn9k_tx.h +++ b/drivers/net/cnxk/cn9k_tx.h @@ -545,7 +545,275 @@ cn9k_nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts, #if defined(RTE_ARCH_ARM64) +static __rte_always_inline void +cn9k_nix_prepare_tso(struct rte_mbuf *m, union nix_send_hdr_w1_u *w1, + union nix_send_ext_w0_u *w0, uint64_t ol_flags, + uint64_t flags) +{ + uint16_t lso_sb; + uint64_t mask; + + if (!(ol_flags & PKT_TX_TCP_SEG)) + return; + + mask = -(!w1->il3type); + lso_sb = (mask & w1->ol4ptr) + (~mask & w1->il4ptr) + m->l4_len; + + w0->u |= BIT(14); + w0->lso_sb = lso_sb; + w0->lso_mps = m->tso_segsz; + w0->lso_format = NIX_LSO_FORMAT_IDX_TSOV4 + !!(ol_flags & PKT_TX_IPV6); + w1->ol4type = NIX_SENDL4TYPE_TCP_CKSUM; + + /* Handle tunnel tso */ + if ((flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F) && + (ol_flags & PKT_TX_TUNNEL_MASK)) { + const uint8_t is_udp_tun = + (CNXK_NIX_UDP_TUN_BITMASK >> + ((ol_flags & PKT_TX_TUNNEL_MASK) >> 45)) & + 0x1; + + w1->il4type = NIX_SENDL4TYPE_TCP_CKSUM; + w1->ol4type = is_udp_tun ? NIX_SENDL4TYPE_UDP_CKSUM : 0; + /* Update format for UDP tunneled packet */ + w0->lso_format += is_udp_tun ? 2 : 6; + + w0->lso_format += !!(ol_flags & PKT_TX_OUTER_IPV6) << 1; + } +} + +static __rte_always_inline uint8_t +cn9k_nix_prepare_mseg_vec_list(struct rte_mbuf *m, uint64_t *cmd, + union nix_send_hdr_w0_u *sh, + union nix_send_sg_s *sg, const uint32_t flags) +{ + struct rte_mbuf *m_next; + uint64_t *slist, sg_u; + uint16_t nb_segs; + uint64_t segdw; + int i = 1; + + sh->total = m->pkt_len; + /* Clear sg->u header before use */ + sg->u &= 0xFC00000000000000; + sg_u = sg->u; + slist = &cmd[0]; + + sg_u = sg_u | ((uint64_t)m->data_len); + + nb_segs = m->nb_segs - 1; + m_next = m->next; + + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) + sg_u |= (cnxk_nix_prefree_seg(m) << 55); + /* Mark mempool object as "put" since it is freed by NIX */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << 55))) + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + rte_io_wmb(); +#endif + + m = m_next; + /* Fill mbuf segments */ + do { + m_next = m->next; + sg_u = sg_u | ((uint64_t)m->data_len << (i << 4)); + *slist = rte_mbuf_data_iova(m); + /* Set invert df if buffer is not to be freed by H/W */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) + sg_u |= (cnxk_nix_prefree_seg(m) << (i + 55)); + /* Mark mempool object as "put" since it is freed by NIX + */ +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + if (!(sg_u & (1ULL << (i + 55)))) + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + rte_io_wmb(); +#endif + slist++; + i++; + nb_segs--; + if (i > 2 && nb_segs) { + i = 0; + /* Next SG subdesc */ + *(uint64_t *)slist = sg_u & 0xFC00000000000000; + sg->u = sg_u; + sg->segs = 3; + sg = (union nix_send_sg_s *)slist; + sg_u = sg->u; + slist++; + } + m = m_next; + } while (nb_segs); + + sg->u = sg_u; + sg->segs = i; + segdw = (uint64_t *)slist - (uint64_t *)&cmd[0]; + + segdw += 2; + /* Roundup extra dwords to multiple of 2 */ + segdw = (segdw >> 1) + (segdw & 0x1); + /* Default dwords */ + segdw += 1 + !!(flags & NIX_TX_NEED_EXT_HDR) + + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + sh->sizem1 = segdw - 1; + + return segdw; +} + +static __rte_always_inline uint8_t +cn9k_nix_prepare_mseg_vec(struct rte_mbuf *m, uint64_t *cmd, uint64x2_t *cmd0, + uint64x2_t *cmd1, const uint32_t flags) +{ + union nix_send_hdr_w0_u sh; + union nix_send_sg_s sg; + uint8_t ret; + + if (m->nb_segs == 1) { + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + sg.u = vgetq_lane_u64(cmd1[0], 0); + sg.u |= (cnxk_nix_prefree_seg(m) << 55); + cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0); + } + +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG + sg.u = vgetq_lane_u64(cmd1[0], 0); + if (!(sg.u & (1ULL << 55))) + __mempool_check_cookies(m->pool, (void **)&m, 1, 0); + rte_io_wmb(); +#endif + return 2 + !!(flags & NIX_TX_NEED_EXT_HDR) + + !!(flags & NIX_TX_OFFLOAD_TSTAMP_F); + } + + sh.u = vgetq_lane_u64(cmd0[0], 0); + sg.u = vgetq_lane_u64(cmd1[0], 0); + + ret = cn9k_nix_prepare_mseg_vec_list(m, cmd, &sh, &sg, flags); + + cmd0[0] = vsetq_lane_u64(sh.u, cmd0[0], 0); + cmd1[0] = vsetq_lane_u64(sg.u, cmd1[0], 0); + return ret; +} + #define NIX_DESCS_PER_LOOP 4 + +static __rte_always_inline void +cn9k_nix_xmit_pkts_mseg_vector(uint64x2_t *cmd0, uint64x2_t *cmd1, + uint64x2_t *cmd2, uint64x2_t *cmd3, + uint8_t *segdw, + uint64_t slist[][CNXK_NIX_TX_MSEG_SG_DWORDS - 2], + uint64_t *lmt_addr, rte_iova_t io_addr, + const uint32_t flags) +{ + uint64_t lmt_status; + uint8_t j, off; + + if (!(flags & NIX_TX_NEED_EXT_HDR) && + !(flags & NIX_TX_OFFLOAD_TSTAMP_F)) { + /* No segments in 4 consecutive packets. */ + if ((segdw[0] + segdw[1] + segdw[2] + segdw[3]) <= 8) { + do { + vst1q_u64(lmt_addr, cmd0[0]); + vst1q_u64(lmt_addr + 2, cmd1[0]); + vst1q_u64(lmt_addr + 4, cmd0[1]); + vst1q_u64(lmt_addr + 6, cmd1[1]); + vst1q_u64(lmt_addr + 8, cmd0[2]); + vst1q_u64(lmt_addr + 10, cmd1[2]); + vst1q_u64(lmt_addr + 12, cmd0[3]); + vst1q_u64(lmt_addr + 14, cmd1[3]); + lmt_status = roc_lmt_submit_ldeor(io_addr); + } while (lmt_status == 0); + + return; + } + } + + for (j = 0; j < NIX_DESCS_PER_LOOP;) { + /* Fit consecutive packets in same LMTLINE. */ + if ((segdw[j] + segdw[j + 1]) <= 8) { +again0: + if ((flags & NIX_TX_NEED_EXT_HDR) && + (flags & NIX_TX_OFFLOAD_TSTAMP_F)) { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd2[j]); + vst1q_u64(lmt_addr + 4, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 4; + roc_lmt_mov_seg(lmt_addr + 6, slist[j], off); + off <<= 1; + vst1q_u64(lmt_addr + 6 + off, cmd3[j]); + + vst1q_u64(lmt_addr + 8 + off, cmd0[j + 1]); + vst1q_u64(lmt_addr + 10 + off, cmd2[j + 1]); + vst1q_u64(lmt_addr + 12 + off, cmd1[j + 1]); + roc_lmt_mov_seg(lmt_addr + 14 + off, + slist[j + 1], segdw[j + 1] - 4); + off += ((segdw[j + 1] - 4) << 1); + vst1q_u64(lmt_addr + 14 + off, cmd3[j + 1]); + } else if (flags & NIX_TX_NEED_EXT_HDR) { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd2[j]); + vst1q_u64(lmt_addr + 4, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 3; + roc_lmt_mov_seg(lmt_addr + 6, slist[j], off); + off <<= 1; + vst1q_u64(lmt_addr + 6 + off, cmd0[j + 1]); + vst1q_u64(lmt_addr + 8 + off, cmd2[j + 1]); + vst1q_u64(lmt_addr + 10 + off, cmd1[j + 1]); + roc_lmt_mov_seg(lmt_addr + 12 + off, + slist[j + 1], segdw[j + 1] - 3); + } else { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 2; + roc_lmt_mov_seg(lmt_addr + 4, slist[j], off); + off <<= 1; + vst1q_u64(lmt_addr + 4 + off, cmd0[j + 1]); + vst1q_u64(lmt_addr + 6 + off, cmd1[j + 1]); + roc_lmt_mov_seg(lmt_addr + 8 + off, + slist[j + 1], segdw[j + 1] - 2); + } + lmt_status = roc_lmt_submit_ldeor(io_addr); + if (lmt_status == 0) + goto again0; + j += 2; + } else { +again1: + if ((flags & NIX_TX_NEED_EXT_HDR) && + (flags & NIX_TX_OFFLOAD_TSTAMP_F)) { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd2[j]); + vst1q_u64(lmt_addr + 4, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 4; + roc_lmt_mov_seg(lmt_addr + 6, slist[j], off); + off <<= 1; + vst1q_u64(lmt_addr + 6 + off, cmd3[j]); + } else if (flags & NIX_TX_NEED_EXT_HDR) { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd2[j]); + vst1q_u64(lmt_addr + 4, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 3; + roc_lmt_mov_seg(lmt_addr + 6, slist[j], off); + } else { + vst1q_u64(lmt_addr, cmd0[j]); + vst1q_u64(lmt_addr + 2, cmd1[j]); + /* Copy segs */ + off = segdw[j] - 2; + roc_lmt_mov_seg(lmt_addr + 4, slist[j], off); + } + lmt_status = roc_lmt_submit_ldeor(io_addr); + if (lmt_status == 0) + goto again1; + j += 1; + } + } +} + static __rte_always_inline uint16_t cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts, uint64_t *cmd, const uint16_t flags) @@ -580,6 +848,12 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, /* Reduce the cached count */ txq->fc_cache_pkts -= pkts; + /* Perform header writes before barrier for TSO */ + if (flags & NIX_TX_OFFLOAD_TSO_F) { + for (i = 0; i < pkts; i++) + cn9k_nix_xmit_prepare_tso(tx_pkts[i], flags); + } + /* Lets commit any changes in the packet here as no further changes * to the packet will be done unless no fast free is enabled. */ @@ -637,6 +911,13 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, sendmem23_w1 = sendmem01_w1; } + if (flags & NIX_TX_OFFLOAD_TSO_F) { + /* Clear the LSO enable bit. */ + sendext01_w0 = vbicq_u64(sendext01_w0, + vdupq_n_u64(BIT_ULL(14))); + sendext23_w0 = sendext01_w0; + } + /* Move mbufs to iova */ mbuf0 = (uint64_t *)tx_pkts[0]; mbuf1 = (uint64_t *)tx_pkts[1]; @@ -1286,7 +1567,52 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, cmd3[3] = vzip2q_u64(sendmem23_w0, sendmem23_w1); } - if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) { + if (flags & NIX_TX_OFFLOAD_TSO_F) { + uint64_t sx_w0[NIX_DESCS_PER_LOOP]; + uint64_t sd_w1[NIX_DESCS_PER_LOOP]; + + /* Extract SD W1 as we need to set L4 types. */ + vst1q_u64(sd_w1, senddesc01_w1); + vst1q_u64(sd_w1 + 2, senddesc23_w1); + + /* Extract SX W0 as we need to set LSO fields. */ + vst1q_u64(sx_w0, sendext01_w0); + vst1q_u64(sx_w0 + 2, sendext23_w0); + + /* Extract ol_flags. */ + xtmp128 = vzip1q_u64(len_olflags0, len_olflags1); + ytmp128 = vzip1q_u64(len_olflags2, len_olflags3); + + /* Prepare individual mbufs. */ + cn9k_nix_prepare_tso(tx_pkts[0], + (union nix_send_hdr_w1_u *)&sd_w1[0], + (union nix_send_ext_w0_u *)&sx_w0[0], + vgetq_lane_u64(xtmp128, 0), flags); + + cn9k_nix_prepare_tso(tx_pkts[1], + (union nix_send_hdr_w1_u *)&sd_w1[1], + (union nix_send_ext_w0_u *)&sx_w0[1], + vgetq_lane_u64(xtmp128, 1), flags); + + cn9k_nix_prepare_tso(tx_pkts[2], + (union nix_send_hdr_w1_u *)&sd_w1[2], + (union nix_send_ext_w0_u *)&sx_w0[2], + vgetq_lane_u64(ytmp128, 0), flags); + + cn9k_nix_prepare_tso(tx_pkts[3], + (union nix_send_hdr_w1_u *)&sd_w1[3], + (union nix_send_ext_w0_u *)&sx_w0[3], + vgetq_lane_u64(ytmp128, 1), flags); + + senddesc01_w1 = vld1q_u64(sd_w1); + senddesc23_w1 = vld1q_u64(sd_w1 + 2); + + sendext01_w0 = vld1q_u64(sx_w0); + sendext23_w0 = vld1q_u64(sx_w0 + 2); + } + + if ((flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) && + !(flags & NIX_TX_MULTI_SEG_F)) { /* Set don't free bit if reference count > 1 */ xmask01 = vdupq_n_u64(0); xmask23 = xmask01; @@ -1330,7 +1656,7 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, * cnxk_nix_prefree_seg are written before LMTST. */ rte_io_wmb(); - } else { + } else if (!(flags & NIX_TX_MULTI_SEG_F)) { /* Move mbufs to iova */ mbuf0 = (uint64_t *)tx_pkts[0]; mbuf1 = (uint64_t *)tx_pkts[1]; @@ -1378,7 +1704,27 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, cmd2[3] = vzip2q_u64(sendext23_w0, sendext23_w1); } - if (flags & NIX_TX_NEED_EXT_HDR) { + if (flags & NIX_TX_MULTI_SEG_F) { + uint64_t seg_list[NIX_DESCS_PER_LOOP] + [CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; + uint8_t j, segdw[NIX_DESCS_PER_LOOP + 1]; + + /* Build mseg list for each packet individually. */ + for (j = 0; j < NIX_DESCS_PER_LOOP; j++) + segdw[j] = cn9k_nix_prepare_mseg_vec(tx_pkts[j], + seg_list[j], &cmd0[j], + &cmd1[j], flags); + segdw[4] = 8; + + /* Commit all changes to mbuf before LMTST. */ + if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) + rte_io_wmb(); + + cn9k_nix_xmit_pkts_mseg_vector(cmd0, cmd1, cmd2, cmd3, + segdw, seg_list, + lmt_addr, io_addr, + flags); + } else if (flags & NIX_TX_NEED_EXT_HDR) { /* With ext header in the command we can no longer send * all 4 packets together since LMTLINE is 128bytes. * Split and Tx twice. @@ -1440,9 +1786,14 @@ cn9k_nix_xmit_pkts_vector(void *tx_queue, struct rte_mbuf **tx_pkts, tx_pkts = tx_pkts + NIX_DESCS_PER_LOOP; } - if (unlikely(pkts_left)) - pkts += cn9k_nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, cmd, - flags); + if (unlikely(pkts_left)) { + if (flags & NIX_TX_MULTI_SEG_F) + pkts += cn9k_nix_xmit_pkts_mseg(tx_queue, tx_pkts, + pkts_left, cmd, flags); + else + pkts += cn9k_nix_xmit_pkts(tx_queue, tx_pkts, pkts_left, + cmd, flags); + } return pkts; } @@ -1607,6 +1958,9 @@ T(ts_tso_noff_vlan_ol3ol4csum_l3l4csum, 1, 1, 1, 1, 1, 1, 8, \ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \ \ uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_vec_##name( \ + void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); \ + \ + uint16_t __rte_noinline __rte_hot cn9k_nix_xmit_pkts_vec_mseg_##name( \ void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t pkts); NIX_TX_FASTPATH_MODES