4 * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5 * Copyright (c) 2016 NXP. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Freescale Semiconductor, Inc nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
43 #include <rte_ethdev.h>
45 #include <fslmc_logs.h>
46 #include <fslmc_vfio.h>
47 #include <dpaa2_hw_pvt.h>
48 #include <dpaa2_hw_dpio.h>
49 #include <dpaa2_hw_mempool.h>
51 #include "dpaa2_ethdev.h"
52 #include "base/dpaa2_hw_dpni_annot.h"
54 static inline uint32_t __attribute__((hot))
55 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
57 uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
58 struct dpaa2_annot_hdr *annotation =
59 (struct dpaa2_annot_hdr *)hw_annot_addr;
61 PMD_RX_LOG(DEBUG, "annotation = 0x%lx ", annotation->word4);
63 if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
64 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
66 } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
67 pkt_type = RTE_PTYPE_L2_ETHER;
72 if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
74 pkt_type |= RTE_PTYPE_L3_IPV4;
75 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
77 pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
79 } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
81 pkt_type |= RTE_PTYPE_L3_IPV6;
82 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
84 pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
89 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
90 L3_IP_1_MORE_FRAGMENT |
91 L3_IP_N_FIRST_FRAGMENT |
92 L3_IP_N_MORE_FRAGMENT)) {
93 pkt_type |= RTE_PTYPE_L4_FRAG;
96 pkt_type |= RTE_PTYPE_L4_NONFRAG;
99 if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
100 pkt_type |= RTE_PTYPE_L4_UDP;
102 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
103 pkt_type |= RTE_PTYPE_L4_TCP;
105 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
106 pkt_type |= RTE_PTYPE_L4_SCTP;
108 else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
109 pkt_type |= RTE_PTYPE_L4_ICMP;
111 else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
112 pkt_type |= RTE_PTYPE_UNKNOWN;
118 static inline void __attribute__((hot))
119 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
121 struct dpaa2_annot_hdr *annotation =
122 (struct dpaa2_annot_hdr *)hw_annot_addr;
124 if (BIT_ISSET_AT_POS(annotation->word3,
125 L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
126 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
128 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
129 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
131 if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
132 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
135 static inline struct rte_mbuf *__attribute__((hot))
136 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
138 struct qbman_sge *sgt, *sge;
142 struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
144 fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
146 /* Get Scatter gather table address */
147 sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
150 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
152 /* First Scatter gather entry */
153 first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
154 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
155 /* Prepare all the metadata for first segment */
156 first_seg->buf_addr = (uint8_t *)sg_addr;
157 first_seg->ol_flags = 0;
158 first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
159 first_seg->data_len = sge->length & 0x1FFFF;
160 first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
161 first_seg->nb_segs = 1;
162 first_seg->next = NULL;
164 first_seg->packet_type = dpaa2_dev_rx_parse(
165 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
166 + DPAA2_FD_PTA_SIZE);
167 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
168 DPAA2_GET_FD_ADDR(fd)) +
169 DPAA2_FD_PTA_SIZE, first_seg);
170 rte_mbuf_refcnt_set(first_seg, 1);
172 while (!DPAA2_SG_IS_FINAL(sge)) {
174 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
175 DPAA2_GET_FLE_ADDR(sge));
176 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
177 rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
178 next_seg->buf_addr = (uint8_t *)sg_addr;
179 next_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
180 next_seg->data_len = sge->length & 0x1FFFF;
181 first_seg->nb_segs += 1;
182 rte_mbuf_refcnt_set(next_seg, 1);
183 cur_seg->next = next_seg;
184 next_seg->next = NULL;
187 temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
188 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
189 rte_mbuf_refcnt_set(temp, 1);
190 rte_pktmbuf_free_seg(temp);
192 return (void *)first_seg;
195 static inline struct rte_mbuf *__attribute__((hot))
196 eth_fd_to_mbuf(const struct qbman_fd *fd)
198 struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
199 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
200 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
202 /* need to repopulated some of the fields,
203 * as they may have changed in last transmission
207 mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
208 mbuf->data_len = DPAA2_GET_FD_LEN(fd);
209 mbuf->pkt_len = mbuf->data_len;
211 /* Parse the packet */
212 /* parse results are after the private - sw annotation area */
213 mbuf->packet_type = dpaa2_dev_rx_parse(
214 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
215 + DPAA2_FD_PTA_SIZE);
217 dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
218 DPAA2_GET_FD_ADDR(fd)) +
219 DPAA2_FD_PTA_SIZE, mbuf);
222 rte_mbuf_refcnt_set(mbuf, 1);
224 PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
225 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
226 mbuf, mbuf->buf_addr, mbuf->data_off,
227 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
228 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
229 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
234 static int __attribute__ ((noinline)) __attribute__((hot))
235 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
236 struct qbman_fd *fd, uint16_t bpid)
238 struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
239 struct qbman_sge *sgt, *sge = NULL;
242 /* First Prepare FD to be transmited*/
243 /* Resetting the buffer pool id and offset field*/
244 fd->simple.bpid_offset = 0;
246 temp = rte_pktmbuf_alloc(mbuf->pool);
248 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
252 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
253 DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
254 DPAA2_SET_FD_OFFSET(fd, temp->data_off);
255 DPAA2_SET_FD_BPID(fd, bpid);
256 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
257 DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
258 /*Set Scatter gather table and Scatter gather entries*/
259 sgt = (struct qbman_sge *)(
260 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
261 + DPAA2_GET_FD_OFFSET(fd));
263 for (i = 0; i < mbuf->nb_segs; i++) {
265 /*Resetting the buffer pool id and offset field*/
266 sge->fin_bpid_offset = 0;
267 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
268 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
269 sge->length = cur_seg->data_len;
270 if (RTE_MBUF_DIRECT(cur_seg)) {
271 if (rte_mbuf_refcnt_read(cur_seg) > 1) {
272 /* If refcnt > 1, invalid bpid is set to ensure
273 * buffer is not freed by HW
275 DPAA2_SET_FLE_IVP(sge);
276 rte_mbuf_refcnt_update(cur_seg, -1);
278 DPAA2_SET_FLE_BPID(sge,
279 mempool_to_bpid(cur_seg->pool));
280 cur_seg = cur_seg->next;
282 /* Get owner MBUF from indirect buffer */
283 mi = rte_mbuf_from_indirect(cur_seg);
284 if (rte_mbuf_refcnt_read(mi) > 1) {
285 /* If refcnt > 1, invalid bpid is set to ensure
286 * owner buffer is not freed by HW
288 DPAA2_SET_FLE_IVP(sge);
290 DPAA2_SET_FLE_BPID(sge,
291 mempool_to_bpid(mi->pool));
292 rte_mbuf_refcnt_update(mi, 1);
295 cur_seg = cur_seg->next;
296 prev_seg->next = NULL;
297 rte_pktmbuf_free(prev_seg);
300 DPAA2_SG_SET_FINAL(sge, true);
305 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
306 struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
308 static void __attribute__ ((noinline)) __attribute__((hot))
309 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
310 struct qbman_fd *fd, uint16_t bpid)
312 /*Resetting the buffer pool id and offset field*/
313 fd->simple.bpid_offset = 0;
315 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
316 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
317 DPAA2_SET_FD_BPID(fd, bpid);
318 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
319 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
321 PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
322 "fd_off=%d fd =%lx, meta = %d bpid =%d, len=%d\n",
323 mbuf, mbuf->buf_addr, mbuf->data_off,
324 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
325 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
326 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
327 if (RTE_MBUF_DIRECT(mbuf)) {
328 if (rte_mbuf_refcnt_read(mbuf) > 1) {
329 DPAA2_SET_FD_IVP(fd);
330 rte_mbuf_refcnt_update(mbuf, -1);
335 mi = rte_mbuf_from_indirect(mbuf);
336 if (rte_mbuf_refcnt_read(mi) > 1)
337 DPAA2_SET_FD_IVP(fd);
339 rte_mbuf_refcnt_update(mi, 1);
340 rte_pktmbuf_free(mbuf);
344 static inline int __attribute__((hot))
345 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
346 struct qbman_fd *fd, uint16_t bpid)
351 if (rte_dpaa2_mbuf_alloc_bulk(
352 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
353 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
354 rte_pktmbuf_free(mbuf);
357 m = (struct rte_mbuf *)mb;
358 memcpy((char *)m->buf_addr + mbuf->data_off,
359 (void *)((char *)mbuf->buf_addr + mbuf->data_off),
362 /* Copy required fields */
363 m->data_off = mbuf->data_off;
364 m->ol_flags = mbuf->ol_flags;
365 m->packet_type = mbuf->packet_type;
366 m->tx_offload = mbuf->tx_offload;
368 /*Resetting the buffer pool id and offset field*/
369 fd->simple.bpid_offset = 0;
371 DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
372 DPAA2_SET_FD_LEN(fd, mbuf->data_len);
373 DPAA2_SET_FD_BPID(fd, bpid);
374 DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
375 DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
377 PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
378 (void *)mbuf, mbuf->buf_addr);
380 PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
381 DPAA2_GET_FD_ADDR(fd),
382 DPAA2_GET_FD_BPID(fd),
383 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
384 DPAA2_GET_FD_OFFSET(fd),
385 DPAA2_GET_FD_LEN(fd));
386 /*free the original packet */
387 rte_pktmbuf_free(mbuf);
393 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
395 /* Function receive frames for a given device and VQ*/
396 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
397 struct qbman_result *dq_storage;
398 uint32_t fqid = dpaa2_q->fqid;
400 uint8_t is_last = 0, status;
401 struct qbman_swp *swp;
402 const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
403 struct qbman_pull_desc pulldesc;
404 struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
405 struct rte_eth_dev *dev = dpaa2_q->dev;
407 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
408 ret = dpaa2_affine_qbman_swp();
410 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
414 swp = DPAA2_PER_LCORE_PORTAL;
415 if (!q_storage->active_dqs) {
416 q_storage->toggle = 0;
417 dq_storage = q_storage->dq_storage[q_storage->toggle];
418 qbman_pull_desc_clear(&pulldesc);
419 qbman_pull_desc_set_numframes(&pulldesc,
420 (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
421 DPAA2_DQRR_RING_SIZE : nb_pkts);
422 qbman_pull_desc_set_fq(&pulldesc, fqid);
423 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
424 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
425 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
426 while (!qbman_check_command_complete(swp,
427 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
429 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
432 if (qbman_swp_pull(swp, &pulldesc)) {
433 PMD_RX_LOG(WARNING, "VDQ command is not issued."
435 /* Portal was busy, try again */
440 q_storage->active_dqs = dq_storage;
441 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
442 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
444 dq_storage = q_storage->active_dqs;
445 /* Check if the previous issued command is completed.
446 * Also seems like the SWP is shared between the Ethernet Driver
447 * and the SEC driver.
449 while (!qbman_check_command_complete(swp, dq_storage))
451 if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
452 clear_swp_active_dqs(q_storage->active_dpio_id);
454 /* Loop until the dq_storage is updated with
457 while (!qbman_result_has_new_result(swp, dq_storage))
459 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
460 /* Check whether Last Pull command is Expired and
461 * setting Condition for Loop termination
463 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
465 /* Check for valid frame. */
466 status = (uint8_t)qbman_result_DQ_flags(dq_storage);
467 if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
470 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
472 /* Prefetch Annotation address for the parse results */
473 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
474 + DPAA2_FD_PTA_SIZE + 16));
476 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
477 bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
479 bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
480 bufs[num_rx]->port = dev->data->port_id;
482 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
483 rte_vlan_strip(bufs[num_rx]);
489 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
490 while (!qbman_check_command_complete(swp,
491 get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
493 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
495 q_storage->toggle ^= 1;
496 dq_storage = q_storage->dq_storage[q_storage->toggle];
497 qbman_pull_desc_clear(&pulldesc);
498 qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
499 qbman_pull_desc_set_fq(&pulldesc, fqid);
500 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
501 (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
502 /* Issue a volatile dequeue command. */
504 if (qbman_swp_pull(swp, &pulldesc)) {
505 PMD_RX_LOG(WARNING, "VDQ command is not issued."
511 q_storage->active_dqs = dq_storage;
512 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
513 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
515 dpaa2_q->rx_pkts += num_rx;
517 /* Return the total number of packets received to DPAA2 app */
522 * Callback to handle sending packets through WRIOP based interface
525 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
527 /* Function to transmit the frames to given device and VQ*/
530 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
532 uint32_t frames_to_send;
533 struct rte_mempool *mp;
534 struct qbman_eq_desc eqdesc;
535 struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
536 struct qbman_swp *swp;
539 struct rte_eth_dev *dev = dpaa2_q->dev;
540 struct dpaa2_dev_priv *priv = dev->data->dev_private;
542 if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
543 ret = dpaa2_affine_qbman_swp();
545 RTE_LOG(ERR, PMD, "Failure in affining portal\n");
549 swp = DPAA2_PER_LCORE_PORTAL;
551 PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
553 /*Prepare enqueue descriptor*/
554 qbman_eq_desc_clear(&eqdesc);
555 qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
556 qbman_eq_desc_set_response(&eqdesc, 0, 0);
557 qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
558 dpaa2_q->flow_id, dpaa2_q->tc_index);
560 /*Clear the unused FD fields before sending*/
562 /*Check if the queue is congested*/
563 if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn))
566 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
568 for (loop = 0; loop < frames_to_send; loop++) {
569 fd_arr[loop].simple.frc = 0;
570 DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
571 DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
572 if (RTE_MBUF_DIRECT(*bufs)) {
575 mi = rte_mbuf_from_indirect(*bufs);
578 /* Not a hw_pkt pool allocated frame */
580 PMD_TX_LOG(ERR, "err: no bpool attached");
583 if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
584 PMD_TX_LOG(ERR, "non hw offload bufffer ");
585 /* alloc should be from the default buffer pool
586 * attached to this interface
589 bpid = priv->bp_list->buf_pool.bpid;
592 "err: no bpool attached");
596 if (unlikely((*bufs)->nb_segs > 1)) {
597 PMD_TX_LOG(ERR, "S/G support not added"
598 " for non hw offload buffer");
601 if (eth_copy_mbuf_to_fd(*bufs,
602 &fd_arr[loop], bpid)) {
607 bpid = mempool_to_bpid(mp);
608 if (unlikely((*bufs)->nb_segs > 1)) {
609 if (eth_mbuf_to_sg_fd(*bufs,
610 &fd_arr[loop], bpid))
613 eth_mbuf_to_fd(*bufs,
614 &fd_arr[loop], bpid);
620 while (loop < frames_to_send) {
621 loop += qbman_swp_send_multiple(swp, &eqdesc,
622 &fd_arr[loop], frames_to_send - loop);
625 num_tx += frames_to_send;
626 dpaa2_q->tx_pkts += frames_to_send;
627 nb_pkts -= frames_to_send;
634 * Dummy DPDK callback for TX.
636 * This function is used to temporarily replace the real callback during
637 * unsafe control operations on the queue, or in case of error.
640 * Generic pointer to TX queue structure.
642 * Packets to transmit.
644 * Number of packets in array.
647 * Number of packets successfully transmitted (<= pkts_n).
650 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)