X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx_vec.c;h=ccd93c7b6b0347a8529cdacbe8ba628d971c0f3f;hb=4d3267092a5c66cd7c85610097af6325213695e4;hp=0edac8247d6d709cbc70a376e59e5ee3ed24d663;hpb=4ca29c87edee368384843ad9cc3db502b8733f03;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c index 0edac8247d..ccd93c7b6b 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c @@ -56,6 +56,8 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) RTE_PKTMBUF_HEADROOM); __m128i dma_addr0, dma_addr1; + const __m128i hba_msk = _mm_set_epi64x(0, UINT64_MAX); + rxdp = rxq->rx_ring + rxq->rxrearm_start; /* Pull 'n' more MBUFs into the software ring */ @@ -108,6 +110,10 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); + /* set Header Buffer Address to zero */ + dma_addr0 = _mm_and_si128(dma_addr0, hba_msk); + dma_addr1 = _mm_and_si128(dma_addr1, hba_msk); + /* flush desc with pa dma_addr */ _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); @@ -134,14 +140,6 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) */ #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE -#define OLFLAGS_MASK ((uint16_t)(PKT_RX_VLAN_PKT | PKT_RX_IPV4_HDR |\ - PKT_RX_IPV4_HDR_EXT | PKT_RX_IPV6_HDR |\ - PKT_RX_IPV6_HDR_EXT)) -#define OLFLAGS_MASK_V (((uint64_t)OLFLAGS_MASK << 48) | \ - ((uint64_t)OLFLAGS_MASK << 32) | \ - ((uint64_t)OLFLAGS_MASK << 16) | \ - ((uint64_t)OLFLAGS_MASK)) -#define PTYPE_SHIFT (1) #define VTAG_SHIFT (3) static inline void @@ -153,19 +151,37 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) uint64_t dword; } vol; + /* pkt type + vlan olflags mask */ + const __m128i pkttype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT); + + /* mask everything except rss type */ + const __m128i rsstype_msk = _mm_set_epi16( + 0x0000, 0x0000, 0x0000, 0x0000, + 0x000F, 0x000F, 0x000F, 0x000F); + + /* map rss type to rss hash flag */ + const __m128i rss_flags = _mm_set_epi8(PKT_RX_FDIR, 0, 0, 0, + 0, 0, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, + PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); vtag1 = _mm_unpackhi_epi16(descs[2], descs[3]); - ptype1 = _mm_unpacklo_epi32(ptype0, ptype1); - vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); + ptype0 = _mm_unpacklo_epi32(ptype0, ptype1); + ptype0 = _mm_and_si128(ptype0, rsstype_msk); + ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); - ptype1 = _mm_slli_epi16(ptype1, PTYPE_SHIFT); + vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT); + vtag1 = _mm_and_si128(vtag1, pkttype_msk); - ptype1 = _mm_or_si128(ptype1, vtag1); - vol.dword = _mm_cvtsi128_si64(ptype1) & OLFLAGS_MASK_V; + vtag1 = _mm_or_si128(ptype0, vtag1); + vol.dword = _mm_cvtsi128_si64(vtag1); rx_pkts[0]->ol_flags = vol.e[0]; rx_pkts[1]->ol_flags = vol.e[1]; @@ -177,13 +193,13 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) #endif /* - * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST) - * in one loop + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) * * Notice: - * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet - * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two * - don't support ol_flags for rss and csum err */ static inline uint16_t @@ -197,16 +213,21 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint64_t var; __m128i shuf_msk; __m128i crc_adjust = _mm_set_epi16( - 0, 0, 0, 0, /* ignore non-length fields */ + 0, 0, 0, /* ignore non-length fields */ + -rxq->crc_len, /* sub crc on data_len */ 0, /* ignore high-16bits of pkt_len */ -rxq->crc_len, /* sub crc on pkt_len */ - -rxq->crc_len, /* sub crc on data_len */ - 0 /* ignore pkt_type field */ + 0, 0 /* ignore pkt_type field */ ); __m128i dd_check, eop_check; + __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, + 0xFFFFFFFF, 0xFFFF07F0); - if (unlikely(nb_pkts < RTE_IXGBE_VPMD_RX_BURST)) - return 0; + /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); /* Just the act of getting into the function from the application is * going to cost about 7 cycles */ @@ -234,46 +255,41 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* mask to shuffle from desc. to mbuf */ shuf_msk = _mm_set_epi8( 7, 6, 5, 4, /* octet 4~7, 32bits rss */ - 0xFF, 0xFF, /* skip high 16 bits vlan_macip, zero out */ 15, 14, /* octet 14~15, low 16 bits vlan_macip */ + 13, 12, /* octet 12~13, 16 bits data_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 13, 12, /* octet 12~13, low 16 bits pkt_len */ - 13, 12, /* octet 12~13, 16 bits data_len */ - 0xFF, 0xFF /* skip pkt_type field */ + 0xFF, 0xFF, /* skip high 16 bits pkt_type */ + 1, /* octet 1, 8 bits pkt_type field */ + 0 /* octet 0, 4 bits offset 4 pkt_type field */ ); /* Cache is empty -> need to scan the buffer rings, but first move * the next 'n' mbufs into the cache */ sw_ring = &rxq->sw_ring[rxq->rx_tail]; - /* - * A. load 4 packet in one loop + /* A. load 4 packet in one loop + * [A*. mask out 4 unused dirty field in desc] * B. copy 4 mbuf point from swring to rx_pkts * C. calc the number of DD bits among the 4 packets * [C*. extract the end-of-packet bit, if requested] * D. fill info. from desc to mbuf */ - for (pos = 0, nb_pkts_recd = 0; pos < RTE_IXGBE_VPMD_RX_BURST; + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; pos += RTE_IXGBE_DESCS_PER_LOOP, rxdp += RTE_IXGBE_DESCS_PER_LOOP) { + __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP]; __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; __m128i zero, staterr, sterr_tmp1, sterr_tmp2; __m128i mbp1, mbp2; /* two mbuf pointer in one XMM reg. */ - if (split_packet) { - rte_prefetch0(&rx_pkts[pos]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); - } - /* B.1 load 1 mbuf point */ mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]); /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ - descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); @@ -281,14 +297,29 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* B.1 load 1 mbuf point */ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); - descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); /* B.1 load 2 mbuf point */ - descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); - descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); + descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); + if (split_packet) { + rte_prefetch0(&rx_pkts[pos]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); + rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + } + + /* A* mask out 0~3 bits RSS type */ + descs[3] = _mm_and_si128(descs0[3], desc_mask); + descs[2] = _mm_and_si128(descs0[2], desc_mask); + + /* A* mask out 0~3 bits RSS type */ + descs[1] = _mm_and_si128(descs0[1], desc_mask); + descs[0] = _mm_and_si128(descs0[0], desc_mask); + /* avoid compiler reorder optimization */ rte_compiler_barrier(); @@ -301,8 +332,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* C.1 4=>2 filter staterr info only */ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); - /* set ol_flags with packet type and vlan tag */ - desc_to_olflags_v(descs, &rx_pkts[pos]); + /* set ol_flags with vlan packet type */ + desc_to_olflags_v(descs0, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); @@ -379,13 +410,13 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, } /* - * vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST) - * in one loop + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) * * Notice: - * - nb_pkts < RTE_IXGBE_VPMD_RX_BURST, just return no packet - * - nb_pkts > RTE_IXGBE_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two * - don't support ol_flags for rss and csum err */ uint16_t @@ -399,12 +430,11 @@ static inline uint16_t reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, uint16_t nb_bufs, uint8_t *split_flags) { - struct rte_mbuf *pkts[RTE_IXGBE_VPMD_RX_BURST]; /*finished pkts*/ + struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ struct rte_mbuf *start = rxq->pkt_first_seg; struct rte_mbuf *end = rxq->pkt_last_seg; unsigned pkt_idx, buf_idx; - for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { if (end != NULL) { /* processing a split packet */ @@ -426,6 +456,8 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, else { /* free up last mbuf */ struct rte_mbuf *secondlast = start; + + start->nb_segs--; while (secondlast->next != end) secondlast = secondlast->next; secondlast->data_len -= (rxq->crc_len - @@ -462,14 +494,17 @@ reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, * * Notice: * - don't support ol_flags for rss and csum err - * - now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST) + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two */ uint16_t ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct ixgbe_rx_queue *rxq = rx_queue; - uint8_t split_flags[RTE_IXGBE_VPMD_RX_BURST] = {0}; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; /* get some new buffers */ uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, @@ -478,10 +513,10 @@ ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, return 0; /* happy day case, full burst + no packets to be joined */ - const uint32_t *split_fl32 = (uint32_t *)split_flags; + const uint64_t *split_fl64 = (uint64_t *)split_flags; if (rxq->pkt_first_seg == NULL && - split_fl32[0] == 0 && split_fl32[1] == 0 && - split_fl32[2] == 0 && split_fl32[3] == 0) + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) return nb_bufs; /* reassemble any packets that need reassembly*/ @@ -537,8 +572,7 @@ ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) * first buffer to free from S/W ring is at index * tx_next_dd - (tx_rs_thresh-1) */ - txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[txq->tx_next_dd - - (n - 1)]; + txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); if (likely(m != NULL)) { free[0] = m; @@ -595,8 +629,8 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint64_t rs = IXGBE_ADVTXD_DCMD_RS|DCMD_DTYP_FLAGS; int i; - if (unlikely(nb_pkts > RTE_IXGBE_VPMD_TX_BURST)) - nb_pkts = RTE_IXGBE_VPMD_TX_BURST; + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) ixgbe_tx_free_bufs(txq); @@ -607,7 +641,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, tx_id = txq->tx_tail; txdp = &txq->tx_ring[tx_id]; - txep = &((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]; + txep = &txq->sw_ring_v[tx_id]; txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); @@ -628,7 +662,7 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, /* avoid reach the end of ring */ txdp = &(txq->tx_ring[tx_id]); - txep = &(((struct ixgbe_tx_entry_v *)txq->sw_ring)[tx_id]); + txep = &txq->sw_ring_v[tx_id]; } tx_backlog_entry(txep, tx_pkts, nb_commit); @@ -651,31 +685,49 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, } static void __attribute__((cold)) -ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq) +ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) { unsigned i; struct ixgbe_tx_entry_v *txe; - uint16_t nb_free, max_desc; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); - if (txq->sw_ring != NULL) { - /* release the used mbufs in sw_ring */ - nb_free = txq->nb_tx_free; - max_desc = (uint16_t)(txq->nb_tx_desc - 1); - for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); - nb_free < max_desc && i != txq->tx_tail; - i = (i + 1) & max_desc) { - txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i]; - if (txe->mbuf != NULL) - rte_pktmbuf_free_seg(txe->mbuf); - } - /* reset tx_entry */ - for (i = 0; i < txq->nb_tx_desc; i++) { - txe = (struct ixgbe_tx_entry_v *)&txq->sw_ring[i]; - txe->mbuf = NULL; - } + if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) + return; + + /* release the used mbufs in sw_ring */ + for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); + i != txq->tx_tail; + i = (i + 1) & max_desc) { + txe = &txq->sw_ring_v[i]; + rte_pktmbuf_free_seg(txe->mbuf); + } + txq->nb_tx_free = max_desc; + + /* reset tx_entry */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txe = &txq->sw_ring_v[i]; + txe->mbuf = NULL; } } +void __attribute__((cold)) +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + const unsigned mask = rxq->nb_rx_desc - 1; + unsigned i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + static void __attribute__((cold)) ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { @@ -683,8 +735,8 @@ ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) return; if (txq->sw_ring != NULL) { - rte_free((struct ixgbe_rx_entry *)txq->sw_ring - 1); - txq->sw_ring = NULL; + rte_free(txq->sw_ring_v - 1); + txq->sw_ring_v = NULL; } } @@ -692,7 +744,7 @@ static void __attribute__((cold)) ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; - struct ixgbe_tx_entry_v *txe = (struct ixgbe_tx_entry_v *)txq->sw_ring; + struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; uint16_t i; /* Zero out HW ring memory */ @@ -723,7 +775,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) } static const struct ixgbe_txq_ops vec_txq_ops = { - .release_mbufs = ixgbe_tx_queue_release_mbufs, + .release_mbufs = ixgbe_tx_queue_release_mbufs_vec, .free_swring = ixgbe_tx_free_swring, .reset = ixgbe_reset_tx_queue, }; @@ -749,12 +801,11 @@ ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) int __attribute__((cold)) ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) { - if (txq->sw_ring == NULL) + if (txq->sw_ring_v == NULL) return -1; /* leave the first one for overflow */ - txq->sw_ring = (struct ixgbe_tx_entry *) - ((struct ixgbe_tx_entry_v *)txq->sw_ring + 1); + txq->sw_ring_v = txq->sw_ring_v + 1; txq->ops = &vec_txq_ops; return 0;