mbuf: add prefetch helpers
[dpdk.git] / drivers / net / i40e / i40e_rxtx_vec.c
index aa7f52a..eef80d9 100644 (file)
@@ -184,45 +184,9 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts)
 #define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
 #endif
 
-#define PKTLEN_SHIFT     (6)
-#define PKTLEN_MASK      (0x3FFF)
-/* Handling the pkt len field is not aligned with 1byte, so shift is
- * needed to let it align
- */
-static inline void
-desc_pktlen_align(__m128i descs[4])
-{
-       __m128i pktlen0, pktlen1, zero;
-       union {
-               uint16_t e[4];
-               uint64_t dword;
-       } vol;
-
-       /* mask everything except pktlen field*/
-       const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK,
-                                               PKTLEN_MASK, PKTLEN_MASK);
+#define PKTLEN_SHIFT     10
 
-       pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]);
-       pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]);
-       pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1);
-
-       zero = _mm_xor_si128(pktlen0, pktlen0);
-
-       pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT);
-       pktlen0 = _mm_and_si128(pktlen0, pktlen_msk);
-
-       pktlen0 = _mm_packs_epi32(pktlen0, zero);
-       vol.dword = _mm_cvtsi128_si64(pktlen0);
-       /* let the descriptor byte 15-14 store the pkt len */
-       *((uint16_t *)&descs[0]+7) = vol.e[0];
-       *((uint16_t *)&descs[1]+7) = vol.e[1];
-       *((uint16_t *)&descs[2]+7) = vol.e[2];
-       *((uint16_t *)&descs[3]+7) = vol.e[3];
-}
-
- /* vPMD receive routine, now only accept (nb_pkts == RTE_I40E_VPMD_RX_BURST)
- * in one loop
- *
+ /*
  * Notice:
  * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
  * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
@@ -333,18 +297,23 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2);
 
                if (split_packet) {
-                       rte_prefetch0(&rx_pkts[pos]->cacheline1);
-                       rte_prefetch0(&rx_pkts[pos + 1]->cacheline1);
-                       rte_prefetch0(&rx_pkts[pos + 2]->cacheline1);
-                       rte_prefetch0(&rx_pkts[pos + 3]->cacheline1);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
+                       rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
                }
 
-               /*shift the pktlen field*/
-               desc_pktlen_align(descs);
-
                /* avoid compiler reorder optimization */
                rte_compiler_barrier();
 
+               /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
+               const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT);
+               const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT);
+
+               /* merge the now-aligned packet length fields back in */
+               descs[3] = _mm_blend_epi16(descs[3], len3, 0x80);
+               descs[2] = _mm_blend_epi16(descs[2], len2, 0x80);
+
                /* D.1 pkt 3,4 convert format from desc to pktmbuf */
                pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
                pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
@@ -360,6 +329,14 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
                pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
                pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
 
+               /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
+               const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT);
+               const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT);
+
+               /* merge the now-aligned packet length fields back in */
+               descs[1] = _mm_blend_epi16(descs[1], len1, 0x80);
+               descs[0] = _mm_blend_epi16(descs[0], len0, 0x80);
+
                /* D.1 pkt 1,2 convert format from desc to pktmbuf */
                pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
                pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
@@ -430,13 +407,11 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
        return nb_pkts_recd;
 }
 
- /* vPMD receive routine, now only accept (nb_pkts == RTE_IXGBE_VPMD_RX_BURST)
- * in one loop
- *
+ /*
  * Notice:
- * - nb_pkts < RTE_I40E_VPMD_RX_BURST, just return no packet
- * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_IXGBE_VPMD_RX_BURST
- *   numbers of DD bit
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ *   numbers of DD bits
  */
 uint16_t
 i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
@@ -445,6 +420,267 @@ i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
 }
 
+static inline uint16_t
+reassemble_packets(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_bufs,
+                  uint16_t nb_bufs, uint8_t *split_flags)
+{
+       struct rte_mbuf *pkts[RTE_I40E_VPMD_RX_BURST]; /*finished pkts*/
+       struct rte_mbuf *start = rxq->pkt_first_seg;
+       struct rte_mbuf *end =  rxq->pkt_last_seg;
+       unsigned pkt_idx, buf_idx;
+
+       for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) {
+               if (end != NULL) {
+                       /* processing a split packet */
+                       end->next = rx_bufs[buf_idx];
+                       rx_bufs[buf_idx]->data_len += rxq->crc_len;
+
+                       start->nb_segs++;
+                       start->pkt_len += rx_bufs[buf_idx]->data_len;
+                       end = end->next;
+
+                       if (!split_flags[buf_idx]) {
+                               /* it's the last packet of the set */
+                               start->hash = end->hash;
+                               start->ol_flags = end->ol_flags;
+                               /* we need to strip crc for the whole packet */
+                               start->pkt_len -= rxq->crc_len;
+                               if (end->data_len > rxq->crc_len) {
+                                       end->data_len -= rxq->crc_len;
+                               } else {
+                                       /* free up last mbuf */
+                                       struct rte_mbuf *secondlast = start;
+
+                                       while (secondlast->next != end)
+                                               secondlast = secondlast->next;
+                                       secondlast->data_len -= (rxq->crc_len -
+                                                       end->data_len);
+                                       secondlast->next = NULL;
+                                       rte_pktmbuf_free_seg(end);
+                                       end = secondlast;
+                               }
+                               pkts[pkt_idx++] = start;
+                               start = end = NULL;
+                       }
+               } else {
+                       /* not processing a split packet */
+                       if (!split_flags[buf_idx]) {
+                               /* not a split packet, save and skip */
+                               pkts[pkt_idx++] = rx_bufs[buf_idx];
+                               continue;
+                       }
+                       end = start = rx_bufs[buf_idx];
+                       rx_bufs[buf_idx]->data_len += rxq->crc_len;
+                       rx_bufs[buf_idx]->pkt_len += rxq->crc_len;
+               }
+       }
+
+       /* save the partial packet for next time */
+       rxq->pkt_first_seg = start;
+       rxq->pkt_last_seg = end;
+       memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts)));
+       return pkt_idx;
+}
+
+ /* vPMD receive routine that reassembles scattered packets
+ * Notice:
+ * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
+ * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
+ *   numbers of DD bits
+ */
+uint16_t
+i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+                            uint16_t nb_pkts)
+{
+
+       struct i40e_rx_queue *rxq = rx_queue;
+       uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
+
+       /* get some new buffers */
+       uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
+                       split_flags);
+       if (nb_bufs == 0)
+               return 0;
+
+       /* happy day case, full burst + no packets to be joined */
+       const uint64_t *split_fl64 = (uint64_t *)split_flags;
+
+       if (rxq->pkt_first_seg == NULL &&
+                       split_fl64[0] == 0 && split_fl64[1] == 0 &&
+                       split_fl64[2] == 0 && split_fl64[3] == 0)
+               return nb_bufs;
+
+       /* reassemble any packets that need reassembly*/
+       unsigned i = 0;
+
+       if (rxq->pkt_first_seg == NULL) {
+               /* find the first split flag, and only reassemble then*/
+               while (i < nb_bufs && !split_flags[i])
+                       i++;
+               if (i == nb_bufs)
+                       return nb_bufs;
+       }
+       return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
+               &split_flags[i]);
+}
+
+static inline void
+vtx1(volatile struct i40e_tx_desc *txdp,
+               struct rte_mbuf *pkt, uint64_t flags)
+{
+       uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
+                       ((uint64_t)flags  << I40E_TXD_QW1_CMD_SHIFT) |
+                       ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
+
+       __m128i descriptor = _mm_set_epi64x(high_qw,
+                               pkt->buf_physaddr + pkt->data_off);
+       _mm_store_si128((__m128i *)txdp, descriptor);
+}
+
+static inline void
+vtx(volatile struct i40e_tx_desc *txdp,
+               struct rte_mbuf **pkt, uint16_t nb_pkts,  uint64_t flags)
+{
+       int i;
+
+       for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
+               vtx1(txdp, *pkt, flags);
+}
+
+static inline int __attribute__((always_inline))
+i40e_tx_free_bufs(struct i40e_tx_queue *txq)
+{
+       struct i40e_tx_entry *txep;
+       uint32_t n;
+       uint32_t i;
+       int nb_free = 0;
+       struct rte_mbuf *m, *free[RTE_I40E_TX_MAX_FREE_BUF_SZ];
+
+       /* check DD bits on threshold descriptor */
+       if ((txq->tx_ring[txq->tx_next_dd].cmd_type_offset_bsz &
+                       rte_cpu_to_le_64(I40E_TXD_QW1_DTYPE_MASK)) !=
+                       rte_cpu_to_le_64(I40E_TX_DESC_DTYPE_DESC_DONE))
+               return 0;
+
+       n = txq->tx_rs_thresh;
+
+        /* first buffer to free from S/W ring is at index
+         * tx_next_dd - (tx_rs_thresh-1)
+         */
+       txep = &txq->sw_ring[txq->tx_next_dd - (n - 1)];
+       m = __rte_pktmbuf_prefree_seg(txep[0].mbuf);
+       if (likely(m != NULL)) {
+               free[0] = m;
+               nb_free = 1;
+               for (i = 1; i < n; i++) {
+                       m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+                       if (likely(m != NULL)) {
+                               if (likely(m->pool == free[0]->pool)) {
+                                       free[nb_free++] = m;
+                               } else {
+                                       rte_mempool_put_bulk(free[0]->pool,
+                                                            (void *)free,
+                                                            nb_free);
+                                       free[0] = m;
+                                       nb_free = 1;
+                               }
+                       }
+               }
+               rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+       } else {
+               for (i = 1; i < n; i++) {
+                       m = __rte_pktmbuf_prefree_seg(txep[i].mbuf);
+                       if (m != NULL)
+                               rte_mempool_put(m->pool, m);
+               }
+       }
+
+       /* buffers were freed, update counters */
+       txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh);
+       txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh);
+       if (txq->tx_next_dd >= txq->nb_tx_desc)
+               txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1);
+
+       return txq->tx_rs_thresh;
+}
+
+static inline void __attribute__((always_inline))
+tx_backlog_entry(struct i40e_tx_entry *txep,
+                struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+       int i;
+
+       for (i = 0; i < (int)nb_pkts; ++i)
+               txep[i].mbuf = tx_pkts[i];
+}
+
+uint16_t
+i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+                  uint16_t nb_pkts)
+{
+       struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
+       volatile struct i40e_tx_desc *txdp;
+       struct i40e_tx_entry *txep;
+       uint16_t n, nb_commit, tx_id;
+       uint64_t flags = I40E_TD_CMD;
+       uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
+       int i;
+
+       /* cross rx_thresh boundary is not allowed */
+       nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
+
+       if (txq->nb_tx_free < txq->tx_free_thresh)
+               i40e_tx_free_bufs(txq);
+
+       nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
+       if (unlikely(nb_pkts == 0))
+               return 0;
+
+       tx_id = txq->tx_tail;
+       txdp = &txq->tx_ring[tx_id];
+       txep = &txq->sw_ring[tx_id];
+
+       txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
+
+       n = (uint16_t)(txq->nb_tx_desc - tx_id);
+       if (nb_commit >= n) {
+               tx_backlog_entry(txep, tx_pkts, n);
+
+               for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
+                       vtx1(txdp, *tx_pkts, flags);
+
+               vtx1(txdp, *tx_pkts++, rs);
+
+               nb_commit = (uint16_t)(nb_commit - n);
+
+               tx_id = 0;
+               txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
+
+               /* avoid reach the end of ring */
+               txdp = &txq->tx_ring[tx_id];
+               txep = &txq->sw_ring[tx_id];
+       }
+
+       tx_backlog_entry(txep, tx_pkts, nb_commit);
+
+       vtx(txdp, tx_pkts, nb_commit, flags);
+
+       tx_id = (uint16_t)(tx_id + nb_commit);
+       if (tx_id > txq->tx_next_rs) {
+               txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
+                       rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
+                                               I40E_TXD_QW1_CMD_SHIFT);
+               txq->tx_next_rs =
+                       (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
+       }
+
+       txq->tx_tail = tx_id;
+
+       I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+
+       return nb_pkts;
+}
+
 void __attribute__((cold))
 i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
 {
@@ -480,3 +716,45 @@ i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
        rxq->mbuf_initializer = *(uint64_t *)p;
        return 0;
 }
+
+int __attribute__((cold))
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+       return 0;
+}
+
+int __attribute__((cold))
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
+{
+#ifndef RTE_LIBRTE_IEEE1588
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
+
+       /* need SSE4.1 support */
+       if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1))
+               return -1;
+
+#ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
+       /* whithout rx ol_flags, no VP flag report */
+       if (rxmode->hw_vlan_strip != 0 ||
+           rxmode->hw_vlan_extend != 0)
+               return -1;
+#endif
+
+       /* no fdir support */
+       if (fconf->mode != RTE_FDIR_MODE_NONE)
+               return -1;
+
+        /* - no csum error report support
+        * - no header split support
+        */
+       if (rxmode->hw_ip_checksum == 1 ||
+           rxmode->header_split == 1)
+               return -1;
+
+       return 0;
+#else
+       RTE_SET_USED(dev);
+       return -1;
+#endif
+}