m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
}
m->hash.rss = fd->simple.flc_hi;
- m->ol_flags |= PKT_RX_RSS_HASH;
+ m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
if (dpaa2_enable_ts[m->port]) {
*dpaa2_timestamp_dynfield(m) = annotation->word2;
annotation->word3, annotation->word4);
#if defined(RTE_LIBRTE_IEEE1588)
- if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
- mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
+ if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+ }
#endif
if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
} else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
(VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
- mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
}
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
L3_IP_N_OPT_PRESENT))
pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
+ L3_PROTO_ESP_PRESENT))
+ pkt_type |= RTE_PTYPE_TUNNEL_ESP;
} else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
L3_IPV6_N_PRESENT)) {
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
L3_IP_N_OPT_PRESENT))
pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
+ if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
+ L3_PROTO_ESP_PRESENT))
+ pkt_type |= RTE_PTYPE_TUNNEL_ESP;
} else {
goto parse_done;
}
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
L3_IP_1_MORE_FRAGMENT |
annotation->word4);
if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
if (dpaa2_enable_ts[mbuf->port]) {
*dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
rte_mbuf_refcnt_set(first_seg, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
+ (void **)&first_seg, 1, 1);
+#endif
cur_seg = first_seg;
while (!DPAA2_SG_IS_FINAL(sge)) {
sge = &sgt[i++];
next_seg->data_len = sge->length & 0x1FFFF;
first_seg->nb_segs += 1;
rte_mbuf_refcnt_set(next_seg, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
+ (void **)&next_seg, 1, 1);
+#endif
cur_seg->next = next_seg;
next_seg->next = NULL;
cur_seg = next_seg;
temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
rte_mbuf_refcnt_set(temp, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+ (void **)&temp, 1, 1);
+#endif
rte_pktmbuf_free_seg(temp);
return (void *)first_seg;
mbuf->port = port_id;
mbuf->next = NULL;
rte_mbuf_refcnt_set(mbuf, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
+ (void **)&mbuf, 1, 1);
+#endif
/* Parse the packet */
/* parse results for LX2 are there in FRC field of FD.
rte_mbuf_refcnt_update(temp, -1);
} else {
DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+ (void **)&temp, 1, 0);
+#endif
}
DPAA2_SET_FD_OFFSET(fd, offset);
} else {
}
DPAA2_SET_ONLY_FD_BPID(fd, bpid);
DPAA2_SET_FD_OFFSET(fd, temp->data_off);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+ (void **)&temp, 1, 0);
+#endif
}
DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
} else {
DPAA2_SET_FLE_BPID(sge,
mempool_to_bpid(cur_seg->pool));
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
+ (void **)&cur_seg, 1, 0);
+#endif
}
}
cur_seg = cur_seg->next;
DPAA2_SET_FD_IVP(fd);
rte_mbuf_refcnt_update(mbuf, -1);
}
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ else
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
+ (void **)&mbuf, 1, 0);
+#endif
} else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
DPAA2_SET_FD_IVP(fd);
} else {
DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
+ (void **)&m, 1, 0);
+#endif
DPAA2_PMD_DP_DEBUG(
"mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
" meta: %d, off: %d, len: %d\n",
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
else
bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
#if defined(RTE_LIBRTE_IEEE1588)
- priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
#endif
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP)
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
rte_vlan_strip(bufs[num_rx]);
dq_storage++;
bufs[num_rx] = eth_fd_to_mbuf(fd,
eth_data->port_id);
+#if defined(RTE_LIBRTE_IEEE1588)
+ if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+ priv->rx_timestamp =
+ *dpaa2_timestamp_dynfield(bufs[num_rx]);
+ }
+#endif
+
if (eth_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_STRIP) {
+ RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
rte_vlan_strip(bufs[num_rx]);
}
struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
struct dpaa2_dev_priv *priv = eth_data->dev_private;
struct dpaa2_annot_hdr *annotation;
+ void *v_addr;
+ struct rte_mbuf *mbuf;
#endif
if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
num_tx_conf++;
num_pulled++;
#if defined(RTE_LIBRTE_IEEE1588)
- annotation = (struct dpaa2_annot_hdr *)((size_t)
- DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
- DPAA2_FD_PTA_SIZE);
- priv->tx_timestamp = annotation->word2;
+ v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+ mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
+ rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+ if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
+ annotation = (struct dpaa2_annot_hdr *)((size_t)
+ DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+ DPAA2_FD_PTA_SIZE);
+ priv->tx_timestamp = annotation->word2;
+ }
#endif
} while (pending);
* corresponding to last packet transmitted for reading
* the timestamp
*/
- priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
- dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
+ priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+ dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+ priv->tx_timestamp = 0;
+ }
#endif
/*Prepare enqueue descriptor*/
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely(((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT) ||
+ & RTE_MBUF_F_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
}
DPAA2_MBUF_TO_CONTIG_FD((*bufs),
&fd_arr[loop], mempool_to_bpid(mp));
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+ rte_mempool_check_cookies
+ (rte_mempool_from_obj((void *)*bufs),
+ (void **)bufs, 1, 0);
+#endif
bufs++;
#ifdef RTE_LIBRTE_IEEE1588
enable_tx_tstamp(&fd_arr[loop]);
goto send_n_return;
}
- if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+ if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
(eth_data->dev_conf.txmode.offloads
- & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+ & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
int ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
}
void
-dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
+dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
+ __rte_unused struct dpaa2_queue *dpaa2_q)
{
struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
struct qbman_fd *fd;
*dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
}
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+ struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function to transmit the frames to multiple queues respectively.*/
+ uint32_t loop, retry_count;
+ int32_t ret;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t frames_to_send;
+ struct rte_mempool *mp;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+ struct qbman_swp *swp;
+ uint16_t bpid;
+ struct rte_mbuf *mi;
+ struct rte_eth_dev_data *eth_data;
+ struct dpaa2_dev_priv *priv;
+ struct dpaa2_queue *order_sendq;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ for (loop = 0; loop < nb_pkts; loop++) {
+ dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+ eth_data = dpaa2_q[loop]->eth_data;
+ priv = eth_data->dev_private;
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+ order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+ dpaa2_set_enqueue_descriptor(order_sendq,
+ (*bufs),
+ &eqdesc[loop]);
+ } else {
+ qbman_eq_desc_set_no_orp(&eqdesc[loop],
+ DPAA2_EQ_RESP_ERR_FQ);
+ qbman_eq_desc_set_fq(&eqdesc[loop],
+ dpaa2_q[loop]->fqid);
+ }
+
+ retry_count = 0;
+ while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+ retry_count++;
+ /* Retry for some time before giving up */
+ if (retry_count > CONG_RETRY_COUNT)
+ goto send_frames;
+ }
+
+ if (likely(RTE_MBUF_DIRECT(*bufs))) {
+ mp = (*bufs)->pool;
+ /* Check the basic scenario and set
+ * the FD appropriately here itself.
+ */
+ if (likely(mp && mp->ops_index ==
+ priv->bp_list->dpaa2_ops_index &&
+ (*bufs)->nb_segs == 1 &&
+ rte_mbuf_refcnt_read((*bufs)) == 1)) {
+ if (unlikely((*bufs)->ol_flags
+ & RTE_MBUF_F_TX_VLAN)) {
+ ret = rte_vlan_insert(bufs);
+ if (ret)
+ goto send_frames;
+ }
+ DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+ &fd_arr[loop],
+ mempool_to_bpid(mp));
+ bufs++;
+ dpaa2_q[loop]++;
+ continue;
+ }
+ } else {
+ mi = rte_mbuf_from_indirect(*bufs);
+ mp = mi->pool;
+ }
+ /* Not a hw_pkt pool allocated frame */
+ if (unlikely(!mp || !priv->bp_list)) {
+ DPAA2_PMD_ERR("Err: No buffer pool attached");
+ goto send_frames;
+ }
+
+ if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+ DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+ /* alloc should be from the default buffer pool
+ * attached to this interface
+ */
+ bpid = priv->bp_list->buf_pool.bpid;
+
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ DPAA2_PMD_ERR(
+ "S/G not supp for non hw offload buffer");
+ goto send_frames;
+ }
+ if (eth_copy_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid)) {
+ goto send_frames;
+ }
+ /* free the original packet */
+ rte_pktmbuf_free(*bufs);
+ } else {
+ bpid = mempool_to_bpid(mp);
+ if (unlikely((*bufs)->nb_segs > 1)) {
+ if (eth_mbuf_to_sg_fd(*bufs,
+ &fd_arr[loop],
+ mp,
+ bpid))
+ goto send_frames;
+ } else {
+ eth_mbuf_to_fd(*bufs,
+ &fd_arr[loop], bpid);
+ }
+ }
+
+ bufs++;
+ dpaa2_q[loop]++;
+ }
+
+send_frames:
+ frames_to_send = loop;
+ loop = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+ &fd_arr[loop],
+ frames_to_send - loop);
+ if (likely(ret > 0)) {
+ loop += ret;
+ } else {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ break;
+ }
+ }
+
+ return loop;
+}
+
/* Callback to handle sending ordered packets through WRIOP based interface */
uint16_t
dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
if (*dpaa2_seqn(*bufs)) {
/* Use only queue 0 for Tx in case of atomic/
* ordered packets as packets can get unordered
- * when being tranmitted out from the interface
+ * when being transmitted out from the interface
*/
dpaa2_set_enqueue_descriptor(order_sendq,
(*bufs),
(*bufs)->nb_segs == 1 &&
rte_mbuf_refcnt_read((*bufs)) == 1)) {
if (unlikely((*bufs)->ol_flags
- & PKT_TX_VLAN_PKT)) {
+ & RTE_MBUF_F_TX_VLAN)) {
ret = rte_vlan_insert(bufs);
if (ret)
goto send_n_return;
return num_tx;
}
-/**
- * Dummy DPDK callback for TX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_txq
- * Generic pointer to TX queue structure.
- * @param[in] pkts
- * Packets to transmit.
- * @param pkts_n
- * Number of packets in array.
- *
- * @return
- * Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-{
- (void)queue;
- (void)bufs;
- (void)nb_pkts;
- return 0;
-}
-
#if defined(RTE_TOOLCHAIN_GCC)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-qual"
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];