fca182785c4bec06e8ced239750243e1e3c5c59f
[dpdk.git] / drivers / net / octeontx2 / otx2_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <rte_vect.h>
6
7 #include "otx2_ethdev.h"
8 #include "otx2_rx.h"
9
10 #define NIX_DESCS_PER_LOOP      4
11 #define CQE_CAST(x)             ((struct nix_cqe_hdr_s *)(x))
12 #define CQE_SZ(x)               ((x) * NIX_CQ_ENTRY_SZ)
13
14 static inline uint16_t
15 nix_rx_nb_pkts(struct otx2_eth_rxq *rxq, const uint64_t wdata,
16                const uint16_t pkts, const uint32_t qmask)
17 {
18         uint32_t available = rxq->available;
19
20         /* Update the available count if cached value is not enough */
21         if (unlikely(available < pkts)) {
22                 uint64_t reg, head, tail;
23
24                 /* Use LDADDA version to avoid reorder */
25                 reg = otx2_atomic64_add_sync(wdata, rxq->cq_status);
26                 /* CQ_OP_STATUS operation error */
27                 if (reg & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
28                     reg & BIT_ULL(CQ_OP_STAT_CQ_ERR))
29                         return 0;
30
31                 tail = reg & 0xFFFFF;
32                 head = (reg >> 20) & 0xFFFFF;
33                 if (tail < head)
34                         available = tail - head + qmask + 1;
35                 else
36                         available = tail - head;
37
38                 rxq->available = available;
39         }
40
41         return RTE_MIN(pkts, available);
42 }
43
44 static __rte_always_inline uint16_t
45 nix_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
46               uint16_t pkts, const uint16_t flags)
47 {
48         struct otx2_eth_rxq *rxq = rx_queue;
49         const uint64_t mbuf_init = rxq->mbuf_initializer;
50         const void *lookup_mem = rxq->lookup_mem;
51         const uint64_t data_off = rxq->data_off;
52         const uintptr_t desc = rxq->desc;
53         const uint64_t wdata = rxq->wdata;
54         const uint32_t qmask = rxq->qmask;
55         uint16_t packets = 0, nb_pkts;
56         uint32_t head = rxq->head;
57         struct nix_cqe_hdr_s *cq;
58         struct rte_mbuf *mbuf;
59
60         nb_pkts = nix_rx_nb_pkts(rxq, wdata, pkts, qmask);
61
62         while (packets < nb_pkts) {
63                 /* Prefetch N desc ahead */
64                 rte_prefetch_non_temporal((void *)(desc + (CQE_SZ(head + 2))));
65                 cq = (struct nix_cqe_hdr_s *)(desc + CQE_SZ(head));
66
67                 mbuf = nix_get_mbuf_from_cqe(cq, data_off);
68
69                 otx2_nix_cqe_to_mbuf(cq, cq->tag, mbuf, lookup_mem, mbuf_init,
70                                      flags);
71                 otx2_nix_mbuf_to_tstamp(mbuf, rxq->tstamp, flags);
72                 rx_pkts[packets++] = mbuf;
73                 otx2_prefetch_store_keep(mbuf);
74                 head++;
75                 head &= qmask;
76         }
77
78         rxq->head = head;
79         rxq->available -= nb_pkts;
80
81         /* Free all the CQs that we've processed */
82         otx2_write64((wdata | nb_pkts), rxq->cq_door);
83
84         return nb_pkts;
85 }
86
87
88 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
89 static uint16_t __rte_noinline  __hot                                          \
90 otx2_nix_recv_pkts_ ## name(void *rx_queue,                                    \
91                         struct rte_mbuf **rx_pkts, uint16_t pkts)              \
92 {                                                                              \
93         return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags));                \
94 }                                                                              \
95                                                                                \
96 static uint16_t __rte_noinline  __hot                                          \
97 otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue,                               \
98                         struct rte_mbuf **rx_pkts, uint16_t pkts)              \
99 {                                                                              \
100         return nix_recv_pkts(rx_queue, rx_pkts, pkts,                          \
101                              (flags) | NIX_RX_MULTI_SEG_F);                    \
102 }                                                                              \
103
104 NIX_RX_FASTPATH_MODES
105 #undef R
106
107 static inline void
108 pick_rx_func(struct rte_eth_dev *eth_dev,
109              const eth_rx_burst_t rx_burst[2][2][2][2][2][2])
110 {
111         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
112
113         /* [TSTMP] [MARK] [VLAN] [CKSUM] [PTYPE] [RSS] */
114         eth_dev->rx_pkt_burst = rx_burst
115                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_TSTAMP_F)]
116                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_MARK_UPDATE_F)]
117                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_VLAN_STRIP_F)]
118                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_CHECKSUM_F)]
119                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_PTYPE_F)]
120                 [!!(dev->rx_offload_flags & NIX_RX_OFFLOAD_RSS_F)];
121 }
122
123 void
124 otx2_eth_set_rx_function(struct rte_eth_dev *eth_dev)
125 {
126         struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
127
128         const eth_rx_burst_t nix_eth_rx_burst[2][2][2][2][2][2] = {
129 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
130         [f5][f4][f3][f2][f1][f0] =  otx2_nix_recv_pkts_ ## name,
131
132 NIX_RX_FASTPATH_MODES
133 #undef R
134         };
135
136         const eth_rx_burst_t nix_eth_rx_burst_mseg[2][2][2][2][2][2] = {
137 #define R(name, f5, f4, f3, f2, f1, f0, flags)                          \
138         [f5][f4][f3][f2][f1][f0] =  otx2_nix_recv_pkts_mseg_ ## name,
139
140 NIX_RX_FASTPATH_MODES
141 #undef R
142         };
143
144         pick_rx_func(eth_dev, nix_eth_rx_burst);
145
146         if (dev->rx_offloads & DEV_RX_OFFLOAD_SCATTER)
147                 pick_rx_func(eth_dev, nix_eth_rx_burst_mseg);
148
149         /* Copy multi seg version with no offload for tear down sequence */
150         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
151                 dev->rx_pkt_burst_no_offload =
152                         nix_eth_rx_burst_mseg[0][0][0][0][0][0];
153         rte_mb();
154 }