net/dpaa2: handle non-hardware backed buffer pool
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright (c) 2016 NXP. All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_dev.h>
43 #include <rte_ethdev.h>
44
45 #include <fslmc_logs.h>
46 #include <fslmc_vfio.h>
47 #include <dpaa2_hw_pvt.h>
48 #include <dpaa2_hw_dpio.h>
49 #include <dpaa2_hw_mempool.h>
50
51 #include "dpaa2_ethdev.h"
52 #include "base/dpaa2_hw_dpni_annot.h"
53
54 static inline uint32_t __attribute__((hot))
55 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
56 {
57         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
58         struct dpaa2_annot_hdr *annotation =
59                         (struct dpaa2_annot_hdr *)hw_annot_addr;
60
61         PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
62
63         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
64                 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
65                 goto parse_done;
66         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
67                 pkt_type = RTE_PTYPE_L2_ETHER;
68         } else {
69                 goto parse_done;
70         }
71
72         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
73                              L3_IPV4_N_PRESENT)) {
74                 pkt_type |= RTE_PTYPE_L3_IPV4;
75                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
76                         L3_IP_N_OPT_PRESENT))
77                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
78
79         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
80                   L3_IPV6_N_PRESENT)) {
81                 pkt_type |= RTE_PTYPE_L3_IPV6;
82                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
83                     L3_IP_N_OPT_PRESENT))
84                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
85         } else {
86                 goto parse_done;
87         }
88
89         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
90             L3_IP_1_MORE_FRAGMENT |
91             L3_IP_N_FIRST_FRAGMENT |
92             L3_IP_N_MORE_FRAGMENT)) {
93                 pkt_type |= RTE_PTYPE_L4_FRAG;
94                 goto parse_done;
95         } else {
96                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
97         }
98
99         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
100                 pkt_type |= RTE_PTYPE_L4_UDP;
101
102         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
103                 pkt_type |= RTE_PTYPE_L4_TCP;
104
105         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
106                 pkt_type |= RTE_PTYPE_L4_SCTP;
107
108         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
109                 pkt_type |= RTE_PTYPE_L4_ICMP;
110
111         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
112                 pkt_type |= RTE_PTYPE_UNKNOWN;
113
114 parse_done:
115         return pkt_type;
116 }
117
118 static inline void __attribute__((hot))
119 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
120 {
121         struct dpaa2_annot_hdr *annotation =
122                 (struct dpaa2_annot_hdr *)hw_annot_addr;
123
124         if (BIT_ISSET_AT_POS(annotation->word3,
125                              L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
126                 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
127
128         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
129                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
130
131         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
132                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
133 }
134
135 static inline struct rte_mbuf *__attribute__((hot))
136 eth_fd_to_mbuf(const struct qbman_fd *fd)
137 {
138         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
139                         DPAA2_GET_FD_ADDR(fd),
140                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
141
142         /* need to repopulated some of the fields,
143          * as they may have changed in last transmission
144          */
145         mbuf->nb_segs = 1;
146         mbuf->ol_flags = 0;
147         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
148         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
149         mbuf->pkt_len = mbuf->data_len;
150
151         /* Parse the packet */
152         /* parse results are after the private - sw annotation area */
153         mbuf->packet_type = dpaa2_dev_rx_parse(
154                         (uint64_t)(DPAA2_GET_FD_ADDR(fd))
155                          + DPAA2_FD_PTA_SIZE);
156
157         dpaa2_dev_rx_offload((uint64_t)(DPAA2_GET_FD_ADDR(fd)) +
158                              DPAA2_FD_PTA_SIZE, mbuf);
159
160         mbuf->next = NULL;
161         rte_mbuf_refcnt_set(mbuf, 1);
162
163         PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
164                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
165                 mbuf, mbuf->buf_addr, mbuf->data_off,
166                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
167                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
168                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
169
170         return mbuf;
171 }
172
173 static void __attribute__ ((noinline)) __attribute__((hot))
174 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
175                struct qbman_fd *fd, uint16_t bpid)
176 {
177         /*Resetting the buffer pool id and offset field*/
178         fd->simple.bpid_offset = 0;
179
180         DPAA2_SET_FD_ADDR(fd, (mbuf->buf_addr));
181         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
182         DPAA2_SET_FD_BPID(fd, bpid);
183         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
184         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
185
186         PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
187                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
188                 mbuf, mbuf->buf_addr, mbuf->data_off,
189                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
190                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
191                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
192 }
193
194
195 static inline int __attribute__((hot))
196 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
197                     struct qbman_fd *fd, uint16_t bpid)
198 {
199         struct rte_mbuf *m;
200         void *mb = NULL;
201
202         if (rte_dpaa2_mbuf_alloc_bulk(
203                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
204                 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
205                 rte_pktmbuf_free(mbuf);
206                 return -1;
207         }
208         m = (struct rte_mbuf *)mb;
209         memcpy((char *)m->buf_addr + mbuf->data_off,
210                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
211                 mbuf->pkt_len);
212
213         /* Copy required fields */
214         m->data_off = mbuf->data_off;
215         m->ol_flags = mbuf->ol_flags;
216         m->packet_type = mbuf->packet_type;
217         m->tx_offload = mbuf->tx_offload;
218
219         /*Resetting the buffer pool id and offset field*/
220         fd->simple.bpid_offset = 0;
221
222         DPAA2_SET_FD_ADDR(fd, (m->buf_addr));
223         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
224         DPAA2_SET_FD_BPID(fd, bpid);
225         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
226         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
227
228         PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
229                    (void *)mbuf, mbuf->buf_addr);
230
231         PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
232                    DPAA2_GET_FD_ADDR(fd),
233                 DPAA2_GET_FD_BPID(fd),
234                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
235                 DPAA2_GET_FD_OFFSET(fd),
236                 DPAA2_GET_FD_LEN(fd));
237         /*free the original packet */
238         rte_pktmbuf_free(mbuf);
239
240         return 0;
241 }
242
243 uint16_t
244 dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
245 {
246         /* Function is responsible to receive frames for a given device and VQ*/
247         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
248         struct qbman_result *dq_storage;
249         uint32_t fqid = dpaa2_q->fqid;
250         int ret, num_rx = 0;
251         uint8_t is_last = 0, status;
252         struct qbman_swp *swp;
253         const struct qbman_fd *fd;
254         struct qbman_pull_desc pulldesc;
255         struct rte_eth_dev *dev = dpaa2_q->dev;
256
257         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
258                 ret = dpaa2_affine_qbman_swp();
259                 if (ret) {
260                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
261                         return 0;
262                 }
263         }
264         swp = DPAA2_PER_LCORE_PORTAL;
265         dq_storage = dpaa2_q->q_storage->dq_storage[0];
266
267         qbman_pull_desc_clear(&pulldesc);
268         qbman_pull_desc_set_numframes(&pulldesc,
269                                       (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
270                                        DPAA2_DQRR_RING_SIZE : nb_pkts);
271         qbman_pull_desc_set_fq(&pulldesc, fqid);
272         /* todo optimization - we can have dq_storage_phys available*/
273         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
274                         (dma_addr_t)(dq_storage), 1);
275
276         /*Issue a volatile dequeue command. */
277         while (1) {
278                 if (qbman_swp_pull(swp, &pulldesc)) {
279                         PMD_RX_LOG(ERR, "VDQ command is not issued."
280                                    "QBMAN is busy\n");
281                         /* Portal was busy, try again */
282                         continue;
283                 }
284                 break;
285         };
286
287         /* Receive the packets till Last Dequeue entry is found with
288          * respect to the above issues PULL command.
289          */
290         while (!is_last) {
291                 struct rte_mbuf *mbuf;
292                 /*Check if the previous issued command is completed.
293                  * Also seems like the SWP is shared between the
294                  * Ethernet Driver and the SEC driver.
295                  */
296                 while (!qbman_check_command_complete(swp, dq_storage))
297                         ;
298                 /* Loop until the dq_storage is updated with
299                  * new token by QBMAN
300                  */
301                 while (!qbman_result_has_new_result(swp, dq_storage))
302                         ;
303                 /* Check whether Last Pull command is Expired and
304                  * setting Condition for Loop termination
305                  */
306                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
307                         is_last = 1;
308                         /* Check for valid frame. */
309                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
310                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
311                                 continue;
312                 }
313
314                 fd = qbman_result_DQ_fd(dq_storage);
315                 mbuf = (struct rte_mbuf *)(DPAA2_GET_FD_ADDR(fd)
316                    - rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
317                 /* Prefeth mbuf */
318                 rte_prefetch0(mbuf);
319                 /* Prefetch Annotation address for the parse results */
320                 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd)
321                                                 + DPAA2_FD_PTA_SIZE + 16));
322
323                 bufs[num_rx] = eth_fd_to_mbuf(fd);
324                 bufs[num_rx]->port = dev->data->port_id;
325
326                 num_rx++;
327                 dq_storage++;
328         } /* End of Packet Rx loop */
329
330         dpaa2_q->rx_pkts += num_rx;
331
332         /*Return the total number of packets received to DPAA2 app*/
333         return num_rx;
334 }
335
336 /*
337  * Callback to handle sending packets through WRIOP based interface
338  */
339 uint16_t
340 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
341 {
342         /* Function to transmit the frames to given device and VQ*/
343         uint32_t loop;
344         int32_t ret;
345         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
346         uint32_t frames_to_send;
347         struct rte_mempool *mp;
348         struct qbman_eq_desc eqdesc;
349         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
350         struct qbman_swp *swp;
351         uint16_t num_tx = 0;
352         uint16_t bpid;
353         struct rte_eth_dev *dev = dpaa2_q->dev;
354         struct dpaa2_dev_priv *priv = dev->data->dev_private;
355
356         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
357                 ret = dpaa2_affine_qbman_swp();
358                 if (ret) {
359                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
360                         return 0;
361                 }
362         }
363         swp = DPAA2_PER_LCORE_PORTAL;
364
365         PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
366
367         /*Prepare enqueue descriptor*/
368         qbman_eq_desc_clear(&eqdesc);
369         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
370         qbman_eq_desc_set_response(&eqdesc, 0, 0);
371         qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
372                              dpaa2_q->flow_id, dpaa2_q->tc_index);
373
374         /*Clear the unused FD fields before sending*/
375         while (nb_pkts) {
376                 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
377
378                 for (loop = 0; loop < frames_to_send; loop++) {
379                         fd_arr[loop].simple.frc = 0;
380                         DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
381                         DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
382                         mp = (*bufs)->pool;
383                         /* Not a hw_pkt pool allocated frame */
384                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
385                                 PMD_TX_LOG(ERR, "non hw offload bufffer ");
386                                 /* alloc should be from the default buffer pool
387                                  * attached to this interface
388                                  */
389                                 if (priv->bp_list) {
390                                         bpid = priv->bp_list->buf_pool.bpid;
391                                 } else {
392                                         PMD_TX_LOG(ERR, "errr: why no bpool"
393                                                    " attached");
394                                         num_tx = 0;
395                                         goto skip_tx;
396                                 }
397                                 if (eth_copy_mbuf_to_fd(*bufs,
398                                                         &fd_arr[loop], bpid)) {
399                                         bufs++;
400                                         continue;
401                                 }
402                         } else {
403                                 bpid = mempool_to_bpid(mp);
404                                 eth_mbuf_to_fd(*bufs, &fd_arr[loop], bpid);
405                         }
406                         bufs++;
407                 }
408                 loop = 0;
409                 while (loop < frames_to_send) {
410                         loop += qbman_swp_send_multiple(swp, &eqdesc,
411                                         &fd_arr[loop], frames_to_send - loop);
412                 }
413
414                 num_tx += frames_to_send;
415                 dpaa2_q->tx_pkts += frames_to_send;
416                 nb_pkts -= frames_to_send;
417         }
418 skip_tx:
419         return num_tx;
420 }