* Copyright 2019 NXP
*/
+#include <sys/epoll.h>
#include <rte_kvargs.h>
#include <rte_ethdev_vdev.h>
#include <rte_bus_vdev.h>
return 0;
}
+static uint16_t
+pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct epoll_event epoll_ev;
+ uint64_t ticks = 1; /* 1 msec */
+ int ret;
+ int have_something, work_done;
+
+#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT)
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ have_something = pfe_hif_rx_process(priv->pfe, nb_pkts);
+ work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool,
+ rx_pkts, nb_pkts);
+
+ if (!have_something || !work_done) {
+ writel(RESET_STATUS, HIF_INT_SRC);
+ writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE);
+ ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks);
+ if (ret < 0 && errno != EINTR)
+ PFE_PMD_ERR("epoll_wait fails with %d\n", errno);
+ }
+
+ return work_done;
+}
+
+static uint16_t
+pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_rx_queue *queue = rxq;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_mempool *pool;
+
+ /*TODO can we remove this cleanup from here?*/
+ pfe_tx_do_cleanup(priv->pfe);
+ pfe_hif_rx_process(priv->pfe, nb_pkts);
+ pool = priv->pfe->hif.shm->pool;
+
+ return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts);
+}
+
+static uint16_t
+pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct hif_client_tx_queue *queue = tx_queue;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i;
+
+ for (i = 0; i < nb_pkts; i++) {
+ if (tx_pkts[i]->nb_segs > 1) {
+ struct rte_mbuf *mbuf;
+ int j;
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER,
+ tx_pkts[i]);
+
+ mbuf = tx_pkts[i]->next;
+ for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, 0x0, mbuf);
+ mbuf = mbuf->next;
+ }
+
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(mbuf),
+ mbuf->buf_addr + mbuf->data_off,
+ mbuf->data_len,
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+ mbuf);
+ } else {
+ hif_lib_xmit_pkt(&priv->client, queue->queue_id,
+ (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]),
+ tx_pkts[i]->buf_addr + tx_pkts[i]->data_off,
+ tx_pkts[i]->pkt_len, 0 /*ctrl*/,
+ HIF_FIRST_BUFFER | HIF_LAST_BUFFER |
+ HIF_DATA_VALID,
+ tx_pkts[i]);
+ }
+ stats->obytes += tx_pkts[i]->pkt_len;
+ hif_tx_dma_start();
+ }
+ stats->opackets += nb_pkts;
+ pfe_tx_do_cleanup(priv->pfe);
+
+ return nb_pkts;
+}
+
+static uint16_t
+pfe_dummy_xmit_pkts(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+static uint16_t
+pfe_dummy_recv_pkts(__rte_unused void *rxq,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
static int
pfe_eth_open(struct rte_eth_dev *dev)
{
" failed", client->id);
goto err0;
}
+ } else {
+ /* Freeing the packets if already exists */
+ int ret = 0;
+ struct rte_mbuf *rx_pkts[32];
+ /* TODO multiqueue support */
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool, rx_pkts, 32);
+ while (ret) {
+ int i;
+ for (i = 0; i < ret; i++)
+ rte_pktmbuf_free(rx_pkts[i]);
+ ret = hif_lib_receive_pkt(&client->rx_q[0],
+ hif_shm->pool,
+ rx_pkts, 32);
+ }
}
} else {
/* Register client driver with HIF */
}
}
rc = pfe_eth_start(priv);
+ dev->rx_pkt_burst = &pfe_recv_pkts;
+ dev->tx_pkt_burst = &pfe_xmit_pkts;
+ /* If no prefetch is configured. */
+ if (getenv("PFE_INTR_SUPPORT")) {
+ dev->rx_pkt_burst = &pfe_recv_pkts_on_intr;
+ PFE_PMD_INFO("PFE INTERRUPT Mode enabled");
+ }
+
err0:
return rc;
gemac_disable(priv->EMAC_baseaddr);
gpi_disable(priv->GPI_baseaddr);
+
+ dev->rx_pkt_burst = &pfe_dummy_recv_pkts;
+ dev->tx_pkt_burst = &pfe_dummy_xmit_pkts;
}
static void
rte_free(hif->descr_baseaddr_v);
}
+/* pfe_hif_release_buffers */
+static void
+pfe_hif_release_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ uint32_t i = 0;
+ struct rte_mbuf *mbuf;
+ struct rte_pktmbuf_pool_private *mb_priv;
+
+ hif->rx_base = hif->descr_baseaddr_v;
+
+ /*Free Rx buffers */
+ desc = hif->rx_base;
+ mb_priv = rte_mempool_get_priv(hif->shm->pool);
+ for (i = 0; i < hif->rx_ring_size; i++) {
+ if (readl(&desc->data)) {
+ if (i < hif->shm->rx_buf_pool_cnt &&
+ !hif->shm->rx_buf_pool[i]) {
+ mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ hif->shm->rx_buf_pool[i] = mbuf;
+ }
+ }
+ writel(0, &desc->data);
+ writel(0, &desc->status);
+ writel(0, &desc->ctrl);
+ desc++;
+ }
+}
+
/*
* pfe_hif_init_buffers
* This function initializes the HIF Rx/Tx ring descriptors and
rte_spinlock_unlock(&hif->tx_lock);
}
+/*
+ * client_put_rxpacket-
+ */
+static struct rte_mbuf *
+client_put_rxpacket(struct hif_rx_queue *queue,
+ void *pkt, u32 len,
+ u32 flags, u32 client_ctrl,
+ struct rte_mempool *pool,
+ u32 *rem_len)
+{
+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
+ struct rte_mbuf *mbuf = NULL;
+
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
+ if (unlikely(!mbuf)) {
+ PFE_PMD_WARN("Buffer allocation failure\n");
+ return NULL;
+ }
+
+ desc->data = pkt;
+ desc->client_ctrl = client_ctrl;
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
+ queue->write_idx = (queue->write_idx + 1)
+ & (queue->size - 1);
+
+ *rem_len = mbuf->buf_len;
+ }
+
+ return mbuf;
+}
+
+/*
+ * pfe_hif_rx_process-
+ * This function does pfe hif rx queue processing.
+ * Dequeue packet from Rx queue and send it to corresponding client queue
+ */
+int
+pfe_hif_rx_process(struct pfe *pfe, int budget)
+{
+ struct hif_desc *desc;
+ struct hif_hdr *pkt_hdr;
+ struct __hif_hdr hif_hdr;
+ void *free_buf;
+ int rtc, len, rx_processed = 0;
+ struct __hif_desc local_desc;
+ int flags = 0, wait_for_last = 0, retry = 0;
+ unsigned int buf_size = 0;
+ struct rte_mbuf *mbuf = NULL;
+ struct pfe_hif *hif = &pfe->hif;
+
+ rte_spinlock_lock(&hif->lock);
+
+ rtc = hif->rxtoclean_index;
+
+ while (rx_processed < budget) {
+ desc = hif->rx_base + rtc;
+
+ __memcpy12(&local_desc, desc);
+
+ /* ACK pending Rx interrupt */
+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
+ if (unlikely(wait_for_last))
+ continue;
+ else
+ break;
+ }
+
+ len = BD_BUF_LEN(local_desc.ctrl);
+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
+
+ /* Track last HIF header received */
+ if (!hif->started) {
+ hif->started = 1;
+
+ __memcpy8(&hif_hdr, pkt_hdr);
+
+ hif->qno = hif_hdr.hdr.q_num;
+ hif->client_id = hif_hdr.hdr.client_id;
+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
+ hif_hdr.hdr.client_ctrl;
+ flags = CL_DESC_FIRST;
+
+ } else {
+ flags = 0;
+ }
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ flags |= CL_DESC_LAST;
+ wait_for_last = 0;
+ } else {
+ wait_for_last = 1;
+ }
+
+ /* Check for valid client id and still registered */
+ if (hif->client_id >= HIF_CLIENTS_MAX ||
+ !(test_bit(hif->client_id,
+ &hif->shm->g_client_status[0]))) {
+ PFE_PMD_INFO("packet with invalid client id %d qnum %d",
+ hif->client_id, hif->qno);
+
+ free_buf = hif->rx_buf_addr[rtc];
+
+ goto pkt_drop;
+ }
+
+ /* Check to valid queue number */
+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
+ PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
+ hif->qno);
+ hif->qno = 0;
+ }
+
+retry:
+ mbuf =
+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+ (void *)pkt_hdr, len, flags,
+ hif->client_ctrl, hif->shm->pool,
+ &buf_size);
+
+ if (unlikely(!mbuf)) {
+ if (!retry) {
+ pfe_tx_do_cleanup(pfe);
+ retry = 1;
+ goto retry;
+ }
+ rx_processed = budget;
+
+ if (flags & CL_DESC_FIRST)
+ hif->started = 0;
+
+ PFE_DP_LOG(DEBUG, "No buffers");
+ break;
+ }
+
+ retry = 0;
+
+ free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
+ free_buf = free_buf - PFE_PKT_HEADER_SZ;
+
+ /*Fill free buffer in the descriptor */
+ hif->rx_buf_addr[rtc] = free_buf;
+ hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
+ mbuf->data_off - PFE_PKT_HEADER_SZ);
+ hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
+
+pkt_drop:
+ writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
+ &desc->ctrl);
+
+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
+ rx_processed++;
+
+ hif->started = 0;
+ }
+ }
+
+
+ hif->rxtoclean_index = rtc;
+ rte_spinlock_unlock(&hif->lock);
+
+ /* we made some progress, re-start rx dma in case it stopped */
+ hif_rx_dma_start();
+
+ return rx_processed;
+}
+
+/*
+ * client_ack_txpacket-
+ * This function ack the Tx packet in the give client Tx queue by resetting
+ * ownership bit in the descriptor.
+ */
+static int
+client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
+ unsigned int q_no)
+{
+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
+ queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
+
+ return 0;
+
+ } else {
+ /*This should not happen */
+ PFE_PMD_ERR("%d %d %d %d %d %p %d",
+ hif->txtosend, hif->txtoclean, hif->txavail,
+ client_id, q_no, queue, queue->ack_idx);
+ return 1;
+ }
+}
+
+static void
+__hif_tx_done_process(struct pfe *pfe, int count)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+ unsigned int ttc, tx_avl;
+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
+ struct pfe_hif *hif = &pfe->hif;
+
+ ttc = hif->txtoclean;
+ tx_avl = hif->txavail;
+
+ while ((tx_avl < hif->tx_ring_size) && count--) {
+ desc = hif->tx_base + ttc;
+
+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
+ break;
+
+ desc_sw = &hif->tx_sw_queue[ttc];
+
+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
+ PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
+
+ pkts_done[desc_sw->client_id]++;
+
+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
+
+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
+ tx_avl++;
+ }
+
+ if (pkts_done[0])
+ hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
+ 0);
+ if (pkts_done[1])
+ hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
+ 0);
+ hif->txtoclean = ttc;
+ hif->txavail = tx_avl;
+}
+
+static inline void
+hif_tx_done_process(struct pfe *pfe, int count)
+{
+ struct pfe_hif *hif = &pfe->hif;
+ rte_spinlock_lock(&hif->tx_lock);
+ __hif_tx_done_process(pfe, count);
+ rte_spinlock_unlock(&hif->tx_lock);
+}
+
+void
+pfe_tx_do_cleanup(struct pfe *pfe)
+{
+ hif_tx_done_process(pfe, HIF_TX_DESC_NT);
+}
+
+/*
+ * __hif_xmit_pkt -
+ * This function puts one packet in the HIF Tx queue
+ */
+void
+hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+
+ desc = hif->tx_base + hif->txtosend;
+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
+
+ desc_sw->len = len;
+ desc_sw->client_id = client_id;
+ desc_sw->q_no = q_no;
+ desc_sw->flags = flags;
+
+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
+
+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
+ hif->txavail--;
+
+ if ((!((flags & HIF_DATA_VALID) && (flags &
+ HIF_LAST_BUFFER))))
+ goto skip_tx;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+
+ do {
+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
+ desc = hif->tx_base + hif->txtoflush;
+
+ if (desc_sw->flags & HIF_LAST_BUFFER) {
+ writel((BD_CTRL_LIFM |
+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
+ BD_BUF_LEN(desc_sw->len)),
+ &desc->ctrl);
+ } else {
+ writel((BD_CTRL_DESC_EN |
+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
+ }
+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
+ }
+ while (hif->txtoflush != hif->txtosend)
+ ;
+
+skip_tx:
+ return;
+}
+
void
hif_process_client_req(struct pfe_hif *hif, int req,
- int data1, __rte_unused int data2)
+ int data1, __rte_unused int data2)
{
unsigned int client_id = data1;
hif_rx_disable();
hif_tx_disable();
+ pfe_hif_release_buffers(hif);
+ pfe_hif_shm_clean(hif->shm);
+
pfe_hif_free_descr(hif);
pfe->hif.setuped = 0;
}
struct rte_device *dev;
};
+void hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags);
void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
data2);
int pfe_hif_init(struct pfe *pfe);
void pfe_hif_exit(struct pfe *pfe);
void pfe_hif_rx_idle(struct pfe_hif *hif);
+int pfe_hif_rx_process(struct pfe *pfe, int budget);
int pfe_hif_init_buffers(struct pfe_hif *hif);
+void pfe_tx_do_cleanup(struct pfe *pfe);
+
+#define __memcpy8(dst, src) memcpy(dst, src, 8)
+#define __memcpy12(dst, src) memcpy(dst, src, 12)
+#define __memcpy(dst, src, len) memcpy(dst, src, len)
#endif /* _PFE_HIF_H_ */
return 0;
}
+#ifdef RTE_LIBRTE_PFE_SW_PARSE
+static inline void
+pfe_sw_parse_pkt(struct rte_mbuf *mbuf)
+{
+ struct rte_net_hdr_lens hdr_lens;
+
+ mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
+ RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
+ | RTE_PTYPE_L4_MASK);
+ mbuf->l2_len = hdr_lens.l2_len;
+ mbuf->l3_len = hdr_lens.l3_len;
+}
+#endif
+
+/*
+ * This function gets one packet from the specified client queue
+ * It also refill the rx buffer
+ */
+int
+hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+ struct rte_mempool *pool, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct rx_queue_desc *desc;
+ struct pfe_eth_priv_s *priv = queue->priv;
+ struct rte_pktmbuf_pool_private *mb_priv;
+ struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL;
+ struct rte_eth_stats *stats = &priv->stats;
+ int i, wait_for_last = 0;
+#ifndef RTE_LIBRTE_PFE_SW_PARSE
+ struct pfe_parse *parse_res;
+#endif
+
+ for (i = 0; i < nb_pkts;) {
+ do {
+ desc = queue->base + queue->read_idx;
+ if ((desc->ctrl & CL_DESC_OWN)) {
+ stats->ipackets += i;
+ return i;
+ }
+
+ mb_priv = rte_mempool_get_priv(pool);
+
+ mbuf = desc->data + PFE_PKT_HEADER_SZ
+ - sizeof(struct rte_mbuf)
+ - RTE_PKTMBUF_HEADROOM
+ - mb_priv->mbuf_priv_size;
+ mbuf->next = NULL;
+ if (desc->ctrl & CL_DESC_FIRST) {
+ /* TODO size of priv data if present in
+ * descriptor
+ */
+ u16 size = 0;
+ mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl)
+ - PFE_PKT_HEADER_SZ - size;
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = queue->port_id;
+#ifdef RTE_LIBRTE_PFE_SW_PARSE
+ pfe_sw_parse_pkt(mbuf);
+#else
+ parse_res = (struct pfe_parse *)(desc->data +
+ PFE_HIF_SIZE);
+ mbuf->packet_type = parse_res->packet_type;
+#endif
+ mbuf->nb_segs = 1;
+ first_mbuf = mbuf;
+ rx_pkts[i++] = first_mbuf;
+ } else {
+ mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl);
+ mbuf->data_off = mbuf->data_off -
+ PFE_PKT_HEADER_SZ;
+ first_mbuf->pkt_len += mbuf->data_len;
+ first_mbuf->nb_segs++;
+ p_mbuf->next = mbuf;
+ }
+ stats->ibytes += mbuf->data_len;
+ p_mbuf = mbuf;
+
+ if (desc->ctrl & CL_DESC_LAST)
+ wait_for_last = 0;
+ else
+ wait_for_last = 1;
+ /*
+ * Needed so we don't free a buffer/page
+ * twice on module_exit
+ */
+ desc->data = NULL;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ rte_wmb();
+
+ desc->ctrl = CL_DESC_OWN;
+ queue->read_idx = (queue->read_idx + 1) &
+ (queue->size - 1);
+ } while (wait_for_last);
+ }
+ stats->ipackets += i;
+ return i;
+}
+
+static inline void
+hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
+ client_id, unsigned int qno,
+ u32 client_ctrl)
+{
+ /* Optimize the write since the destinaton may be non-cacheable */
+ if (!((unsigned long)pkt_hdr & 0x3)) {
+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
+ client_id;
+ } else {
+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
+ }
+}
+
+/*This function puts the given packet in the specific client queue */
+void
+hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+ void *data, void *data1, unsigned int len,
+ u32 client_ctrl, unsigned int flags, void *client_data)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+ /* First buffer */
+ if (flags & HIF_FIRST_BUFFER) {
+ data1 -= PFE_HIF_SIZE;
+ data -= PFE_HIF_SIZE;
+ len += PFE_HIF_SIZE;
+
+ hif_hdr_write(data1, client->id, qno, client_ctrl);
+ }
+
+ desc->data = client_data;
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
+
+ hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags);
+
+ queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
+
+ queue->tx_pending++;
+}
+
void *
hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
unsigned int *flags, __rte_unused int count)
void pfe_hif_lib_exit(struct pfe *pfe);
int hif_lib_client_register(struct hif_client_s *client);
int hif_lib_client_unregister(struct hif_client_s *client);
+void hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
+ void *data, void *data1, unsigned int len,
+ u32 client_ctrl, unsigned int flags, void *client_data);
void hif_lib_indicate_client(struct hif_client_s *client, int event, int data);
int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
data);
int pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool);
void pfe_hif_shm_clean(struct hif_shm *hif_shm);
+int hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
+ struct rte_mempool *pool,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
#endif /* _PFE_HIF_LIB_H_ */
struct pfe;
+#include <rte_ethdev.h>
+
#include "pfe.h"
#include "pfe_hif.h"
#include "pfe_hif_lib.h"