/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2016-2020 NXP
+ * Copyright 2016-2021 NXP
*
*/
#include <net/if.h>
#include <rte_mbuf.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
#include <rte_string_fns.h>
#include <rte_dev.h>
+#include <rte_hexdump.h>
#include <rte_fslmc.h>
#include <fslmc_vfio.h>
static void enable_tx_tstamp(struct qbman_fd *fd) __rte_unused;
+static inline rte_mbuf_timestamp_t *
+dpaa2_timestamp_dynfield(struct rte_mbuf *mbuf)
+{
+ return RTE_MBUF_DYNFIELD(mbuf,
+ dpaa2_timestamp_dynfield_offset, rte_mbuf_timestamp_t *);
+}
+
#define DPAA2_MBUF_TO_CONTIG_FD(_mbuf, _fd, _bpid) do { \
DPAA2_SET_FD_ADDR(_fd, DPAA2_MBUF_VADDR_TO_IOVA(_mbuf)); \
DPAA2_SET_FD_LEN(_fd, _mbuf->data_len); \
m->hash.rss = fd->simple.flc_hi;
m->ol_flags |= PKT_RX_RSS_HASH;
- if (dpaa2_enable_ts) {
- m->timestamp = annotation->word2;
- m->ol_flags |= PKT_RX_TIMESTAMP;
- DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "", m->timestamp);
+ if (dpaa2_enable_ts[m->port]) {
+ *dpaa2_timestamp_dynfield(m) = annotation->word2;
+ m->ol_flags |= dpaa2_timestamp_rx_dynflag;
+ DPAA2_PMD_DP_DEBUG("pkt timestamp:0x%" PRIx64 "",
+ *dpaa2_timestamp_dynfield(m));
}
DPAA2_PMD_DP_DEBUG("HW frc = 0x%x\t packet type =0x%x "
goto parse_done;
}
+ if (BIT_ISSET_AT_POS(annotation->word3, L2_MPLS_1_PRESENT |
+ L2_MPLS_N_PRESENT))
+ pkt_type |= RTE_PTYPE_L2_ETHER_MPLS;
+
if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
L3_IPV4_N_PRESENT)) {
pkt_type |= RTE_PTYPE_L3_IPV4;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- mbuf->ol_flags |= PKT_RX_TIMESTAMP;
- mbuf->timestamp = annotation->word2;
- DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "", mbuf->timestamp);
+ if (dpaa2_enable_ts[mbuf->port]) {
+ *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
+ mbuf->ol_flags |= dpaa2_timestamp_rx_dynflag;
+ DPAA2_PMD_DP_DEBUG("pkt timestamp: 0x%" PRIx64 "",
+ *dpaa2_timestamp_dynfield(mbuf));
+ }
/* Check detailed parsing requirement */
if (annotation->word3 & 0x7FFFFC3FFFF)
static int __rte_noinline __rte_hot
eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
- struct qbman_fd *fd, uint16_t bpid)
+ struct qbman_fd *fd,
+ struct rte_mempool *mp, uint16_t bpid)
{
struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
struct qbman_sge *sgt, *sge = NULL;
- int i;
+ int i, offset = 0;
- temp = rte_pktmbuf_alloc(mbuf->pool);
- if (temp == NULL) {
- DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
- return -ENOMEM;
+#ifdef RTE_LIBRTE_IEEE1588
+ /* annotation area for timestamp in first buffer */
+ offset = 0x64;
+#endif
+ if (RTE_MBUF_DIRECT(mbuf) &&
+ (mbuf->data_off > (mbuf->nb_segs * sizeof(struct qbman_sge)
+ + offset))) {
+ temp = mbuf;
+ if (rte_mbuf_refcnt_read(temp) > 1) {
+ /* If refcnt > 1, invalid bpid is set to ensure
+ * buffer is not freed by HW
+ */
+ fd->simple.bpid_offset = 0;
+ DPAA2_SET_FD_IVP(fd);
+ rte_mbuf_refcnt_update(temp, -1);
+ } else {
+ DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+ }
+ DPAA2_SET_FD_OFFSET(fd, offset);
+ } else {
+ temp = rte_pktmbuf_alloc(mp);
+ if (temp == NULL) {
+ DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
+ return -ENOMEM;
+ }
+ DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+ DPAA2_SET_FD_OFFSET(fd, temp->data_off);
}
-
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
- DPAA2_SET_ONLY_FD_BPID(fd, bpid);
- DPAA2_SET_FD_OFFSET(fd, temp->data_off);
DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
DPAA2_RESET_FD_FRC(fd);
DPAA2_RESET_FD_CTRL(fd);
+ DPAA2_RESET_FD_FLC(fd);
/*Set Scatter gather table and Scatter gather entries*/
sgt = (struct qbman_sge *)(
(size_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
sge->length = cur_seg->data_len;
if (RTE_MBUF_DIRECT(cur_seg)) {
- if (rte_mbuf_refcnt_read(cur_seg) > 1) {
+ /* if we are using inline SGT in same buffers
+ * set the FLE FMT as Frame Data Section
+ */
+ if (temp == cur_seg) {
+ DPAA2_SG_SET_FORMAT(sge, qbman_fd_list);
+ DPAA2_SET_FLE_IVP(sge);
+ } else {
+ if (rte_mbuf_refcnt_read(cur_seg) > 1) {
/* If refcnt > 1, invalid bpid is set to ensure
* buffer is not freed by HW
*/
- DPAA2_SET_FLE_IVP(sge);
- rte_mbuf_refcnt_update(cur_seg, -1);
- } else
- DPAA2_SET_FLE_BPID(sge,
+ DPAA2_SET_FLE_IVP(sge);
+ rte_mbuf_refcnt_update(cur_seg, -1);
+ } else {
+ DPAA2_SET_FLE_BPID(sge,
mempool_to_bpid(cur_seg->pool));
+ }
+ }
+ cur_seg = cur_seg->next;
+ } else if (RTE_MBUF_HAS_EXTBUF(cur_seg)) {
+ DPAA2_SET_FLE_IVP(sge);
cur_seg = cur_seg->next;
} else {
/* Get owner MBUF from indirect buffer */
DPAA2_SET_FD_IVP(fd);
rte_mbuf_refcnt_update(mbuf, -1);
}
+ } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
+ DPAA2_SET_FD_IVP(fd);
} else {
struct rte_mbuf *mi;
return 0;
}
+static void
+dump_err_pkts(struct dpaa2_queue *dpaa2_q)
+{
+ /* Function receive frames for a given device and VQ */
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0, num_pulled;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd;
+ struct qbman_pull_desc pulldesc;
+ struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ uint32_t lcore_id = rte_lcore_id();
+ void *v_addr, *hw_annot_addr;
+ struct dpaa2_fas *fas;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ dq_storage = dpaa2_q->q_storage[lcore_id].dq_storage[0];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+ qbman_pull_desc_set_numframes(&pulldesc, dpaa2_dqrr_size);
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG("VDQ command is not issued.QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+ v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ hw_annot_addr = (void *)((size_t)v_addr + DPAA2_FD_PTA_SIZE);
+ fas = hw_annot_addr;
+
+ DPAA2_PMD_ERR("\n\n[%d] error packet on port[%d]:"
+ " fd_off: %d, fd_err: %x, fas_status: %x",
+ rte_lcore_id(), eth_data->port_id,
+ DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ERR(fd),
+ fas->status);
+ rte_hexdump(stderr, "Error packet", v_addr,
+ DPAA2_GET_FD_OFFSET(fd) + DPAA2_GET_FD_LEN(fd));
+
+ dq_storage++;
+ num_rx++;
+ num_pulled++;
+ } while (pending);
+
+ dpaa2_q->err_pkts += num_rx;
+}
+
/* This function assumes that caller will be keep the same value for nb_pkts
* across calls per queue, if that is not the case, better use non-prefetch
* version of rx call.
struct qbman_pull_desc pulldesc;
struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
-#if defined(RTE_LIBRTE_IEEE1588)
struct dpaa2_dev_priv *priv = eth_data->dev_private;
-#endif
+
+ if (unlikely(dpaa2_enable_err_queue))
+ dump_err_pkts(priv->rx_err_vq);
if (unlikely(!DPAA2_PER_LCORE_ETHRX_DPIO)) {
ret = dpaa2_affine_qbman_ethrx_swp();
else
bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
#if defined(RTE_LIBRTE_IEEE1588)
- priv->rx_timestamp = bufs[num_rx]->timestamp;
+ priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
#endif
if (eth_data->dev_conf.rxmode.offloads &
ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
dqrr_index = qbman_get_dqrr_idx(dq);
- ev->mbuf->seqn = dqrr_index + 1;
+ *dpaa2_seqn(ev->mbuf) = dqrr_index + 1;
DPAA2_PER_LCORE_DQRR_SIZE++;
DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
ev->mbuf = eth_fd_to_mbuf(fd, rxq->eth_data->port_id);
- ev->mbuf->seqn = DPAA2_ENQUEUE_FLAG_ORP;
- ev->mbuf->seqn |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
- ev->mbuf->seqn |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
+ *dpaa2_seqn(ev->mbuf) = DPAA2_ENQUEUE_FLAG_ORP;
+ *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_odpid(dq) << DPAA2_EQCR_OPRID_SHIFT;
+ *dpaa2_seqn(ev->mbuf) |= qbman_result_DQ_seqnum(dq) << DPAA2_EQCR_SEQNUM_SHIFT;
qbman_swp_dqrr_consume(swp, dq);
}
const struct qbman_fd *fd;
struct qbman_pull_desc pulldesc;
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+ struct dpaa2_dev_priv *priv = eth_data->dev_private;
+
+ if (unlikely(dpaa2_enable_err_queue))
+ dump_err_pkts(priv->rx_err_vq);
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
uint32_t flags[MAX_TX_RING_SLOTS] = {0};
+ struct rte_mbuf **orig_bufs = bufs;
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
ret = dpaa2_affine_qbman_swp();
dpaa2_eqcr_size : nb_pkts;
for (loop = 0; loop < frames_to_send; loop++) {
- if ((*bufs)->seqn) {
- uint8_t dqrr_index = (*bufs)->seqn - 1;
+ if (*dpaa2_seqn(*bufs)) {
+ uint8_t dqrr_index = *dpaa2_seqn(*bufs) - 1;
flags[loop] = QBMAN_ENQUEUE_FLAG_DCA |
dqrr_index;
DPAA2_PER_LCORE_DQRR_SIZE--;
DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
- (*bufs)->seqn = DPAA2_INVALID_MBUF_SEQN;
+ *dpaa2_seqn(*bufs) = DPAA2_INVALID_MBUF_SEQN;
}
if (likely(RTE_MBUF_DIRECT(*bufs))) {
mi = rte_mbuf_from_indirect(*bufs);
mp = mi->pool;
}
+
+ if (unlikely(RTE_MBUF_HAS_EXTBUF(*bufs))) {
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp, 0))
+ goto send_n_return;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], 0);
+ }
+ bufs++;
+#ifdef RTE_LIBRTE_IEEE1588
+ enable_tx_tstamp(&fd_arr[loop]);
+#endif
+ continue;
+ }
+
/* Not a hw_pkt pool allocated frame */
if (unlikely(!mp || !priv->bp_list)) {
DPAA2_PMD_ERR("Err: No buffer pool attached");
bpid = mempool_to_bpid(mp);
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
- &fd_arr[loop], bpid))
+ &fd_arr[loop],
+ mp, bpid))
goto send_n_return;
} else {
eth_mbuf_to_fd(*bufs,
nb_pkts -= loop;
}
dpaa2_q->tx_pkts += num_tx;
+
+ loop = 0;
+ while (loop < num_tx) {
+ if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
+ rte_pktmbuf_free(*orig_bufs);
+ orig_bufs++;
+ loop++;
+ }
+
return num_tx;
send_n_return:
}
skip_tx:
dpaa2_q->tx_pkts += num_tx;
+
+ loop = 0;
+ while (loop < num_tx) {
+ if (unlikely(RTE_MBUF_HAS_EXTBUF(*orig_bufs)))
+ rte_pktmbuf_free(*orig_bufs);
+ orig_bufs++;
+ loop++;
+ }
+
return num_tx;
}
qbman_eq_desc_set_fq(eqdesc, dpaa2_q->fqid);
- if (m->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
- orpid = (m->seqn & DPAA2_EQCR_OPRID_MASK) >>
+ if (*dpaa2_seqn(m) & DPAA2_ENQUEUE_FLAG_ORP) {
+ orpid = (*dpaa2_seqn(m) & DPAA2_EQCR_OPRID_MASK) >>
DPAA2_EQCR_OPRID_SHIFT;
- seqnum = (m->seqn & DPAA2_EQCR_SEQNUM_MASK) >>
+ seqnum = (*dpaa2_seqn(m) & DPAA2_EQCR_SEQNUM_MASK) >>
DPAA2_EQCR_SEQNUM_SHIFT;
if (!priv->en_loose_ordered) {
qbman_eq_desc_set_orp(eqdesc, 0, orpid, seqnum, 0);
}
} else {
- dq_idx = m->seqn - 1;
+ dq_idx = *dpaa2_seqn(m) - 1;
qbman_eq_desc_set_dca(eqdesc, 1, dq_idx, 0);
DPAA2_PER_LCORE_DQRR_SIZE--;
DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dq_idx);
}
- m->seqn = DPAA2_INVALID_MBUF_SEQN;
+ *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
/* Callback to handle sending ordered packets through WRIOP based interface */
dpaa2_eqcr_size : nb_pkts;
if (!priv->en_loose_ordered) {
- if ((*bufs)->seqn & DPAA2_ENQUEUE_FLAG_ORP) {
+ if (*dpaa2_seqn(*bufs) & DPAA2_ENQUEUE_FLAG_ORP) {
num_free_eq_desc = dpaa2_free_eq_descriptors();
if (num_free_eq_desc < frames_to_send)
frames_to_send = num_free_eq_desc;
/*Prepare enqueue descriptor*/
qbman_eq_desc_clear(&eqdesc[loop]);
- if ((*bufs)->seqn) {
+ if (*dpaa2_seqn(*bufs)) {
/* Use only queue 0 for Tx in case of atomic/
* ordered packets as packets can get unordered
* when being tranmitted out from the interface
if (unlikely((*bufs)->nb_segs > 1)) {
if (eth_mbuf_to_sg_fd(*bufs,
&fd_arr[loop],
+ mp,
bpid))
goto send_n_return;
} else {