+/**
+ * DPDK callback for receive.
+ *
+ * @param rxq
+ * Generic pointer to the receive queue.
+ * @param rx_pkts
+ * Array to store received packets.
+ * @param nb_pkts
+ * Maximum number of packets in array.
+ *
+ * @return
+ * Number of packets successfully received.
+ */
+static uint16_t
+mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_rxq *q = rxq;
+ struct pp2_ppio_desc descs[nb_pkts];
+ struct pp2_bpool *bpool;
+ int i, ret, rx_done = 0;
+ int num;
+ unsigned int core_id = rte_lcore_id();
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ bpool = q->priv->bpool;
+
+ ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
+ q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
+ if (unlikely(ret < 0)) {
+ RTE_LOG(ERR, PMD, "Failed to receive packets\n");
+ return 0;
+ }
+ mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf;
+ enum pp2_inq_desc_status status;
+ uint64_t addr;
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct pp2_ppio_desc *pref_desc;
+ u64 pref_addr;
+
+ pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ pref_addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(pref_desc);
+ rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
+ rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
+ }
+
+ addr = cookie_addr_high |
+ pp2_ppio_inq_desc_get_cookie(&descs[i]);
+ mbuf = (struct rte_mbuf *)addr;
+ rte_pktmbuf_reset(mbuf);
+
+ /* drop packet in case of mac, overrun or resource error */
+ status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
+ if (unlikely(status != PP2_DESC_ERR_OK)) {
+ struct pp2_buff_inf binf = {
+ .addr = rte_mbuf_data_dma_addr_default(mbuf),
+ .cookie = (pp2_cookie_t)(uint64_t)mbuf,
+ };
+
+ pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id]++;
+ continue;
+ }
+
+ mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
+ mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
+ mbuf->data_len = mbuf->pkt_len;
+ mbuf->port = q->port_id;
+
+ rx_pkts[rx_done++] = mbuf;
+ }
+
+ if (rte_spinlock_trylock(&q->priv->lock) == 1) {
+ num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
+
+ if (unlikely(num <= q->priv->bpool_min_size ||
+ (!rx_done && num < q->priv->bpool_init_size))) {
+ ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
+ if (ret)
+ RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
+ } else if (unlikely(num > q->priv->bpool_max_size)) {
+ int i;
+ int pkt_to_remove = num - q->priv->bpool_init_size;
+ struct rte_mbuf *mbuf;
+ struct pp2_buff_inf buff;
+
+ RTE_LOG(DEBUG, PMD,
+ "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
+ bpool->pp2_id, q->priv->ppio->port_id,
+ bpool->id, pkt_to_remove, num,
+ q->priv->bpool_init_size);
+
+ for (i = 0; i < pkt_to_remove; i++) {
+ pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ }
+ mrvl_port_bpool_size
+ [bpool->pp2_id][bpool->id][core_id] -=
+ pkt_to_remove;
+ }
+ rte_spinlock_unlock(&q->priv->lock);
+ }
+
+ return rx_done;
+}
+
+/**
+ * Release already sent buffers to bpool (buffer-pool).
+ *
+ * @param ppio
+ * Pointer to the port structure.
+ * @param hif
+ * Pointer to the MUSDK hardware interface.
+ * @param sq
+ * Pointer to the shadow queue.
+ * @param qid
+ * Queue id number.
+ * @param force
+ * Force releasing packets.
+ */
+static inline void
+mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
+ struct mrvl_shadow_txq *sq, int qid, int force)
+{
+ struct buff_release_entry *entry;
+ uint16_t nb_done = 0, num = 0, skip_bufs = 0;
+ int i, core_id = rte_lcore_id();
+
+ pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
+
+ sq->num_to_release += nb_done;
+
+ if (likely(!force &&
+ sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
+ return;
+
+ nb_done = sq->num_to_release;
+ sq->num_to_release = 0;
+
+ for (i = 0; i < nb_done; i++) {
+ entry = &sq->ent[sq->tail + num];
+ if (unlikely(!entry->buff.addr)) {
+ RTE_LOG(ERR, PMD,
+ "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
+ sq->tail, (u64)entry->buff.cookie,
+ (u64)entry->buff.addr);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ if (unlikely(!entry->bpool)) {
+ struct rte_mbuf *mbuf;
+
+ mbuf = (struct rte_mbuf *)
+ (cookie_addr_high | entry->buff.cookie);
+ rte_pktmbuf_free(mbuf);
+ skip_bufs = 1;
+ goto skip;
+ }
+
+ mrvl_port_bpool_size
+ [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
+ num++;
+ if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
+ goto skip;
+ continue;
+skip:
+ if (likely(num))
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ num += skip_bufs;
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ num = 0;
+ }
+
+ if (likely(num)) {
+ pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
+ sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size -= num;
+ }
+}
+
+/**
+ * DPDK callback for transmit.
+ *
+ * @param txq
+ * Generic pointer transmit queue.
+ * @param tx_pkts
+ * Packets to transmit.
+ * @param nb_pkts
+ * Number of packets in array.
+ *
+ * @return
+ * Number of packets successfully transmitted.
+ */
+static uint16_t
+mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
+{
+ struct mrvl_txq *q = txq;
+ struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
+ struct pp2_hif *hif = hifs[rte_lcore_id()];
+ struct pp2_ppio_desc descs[nb_pkts];
+ int i;
+ uint16_t num, sq_free_size;
+
+ if (unlikely(!q->priv->ppio))
+ return 0;
+
+ if (sq->size)
+ mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
+
+ sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
+ if (unlikely(nb_pkts > sq_free_size)) {
+ RTE_LOG(DEBUG, PMD,
+ "No room in shadow queue for %d packets! %d packets will be sent.\n",
+ nb_pkts, sq_free_size);
+ nb_pkts = sq_free_size;
+ }
+
+ for (i = 0; i < nb_pkts; i++) {
+ struct rte_mbuf *mbuf = tx_pkts[i];
+
+ if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
+ struct rte_mbuf *pref_pkt_hdr;
+
+ pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
+ rte_mbuf_prefetch_part1(pref_pkt_hdr);
+ rte_mbuf_prefetch_part2(pref_pkt_hdr);
+ }
+
+ sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
+ sq->ent[sq->head].buff.addr =
+ rte_mbuf_data_dma_addr_default(mbuf);
+ sq->ent[sq->head].bpool =
+ (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
+ NULL : mrvl_port_to_bpool_lookup[mbuf->port];
+ sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
+ sq->size++;
+
+ pp2_ppio_outq_desc_reset(&descs[i]);
+ pp2_ppio_outq_desc_set_phys_addr(&descs[i],
+ rte_pktmbuf_mtophys(mbuf));
+ pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
+ pp2_ppio_outq_desc_set_pkt_len(&descs[i],
+ rte_pktmbuf_pkt_len(mbuf));
+ }
+
+ num = nb_pkts;
+ pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
+ /* number of packets that were not sent */
+ if (unlikely(num > nb_pkts)) {
+ for (i = nb_pkts; i < num; i++) {
+ sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
+ MRVL_PP2_TX_SHADOWQ_MASK;
+ }
+ sq->size -= num - nb_pkts;
+ }
+
+ return nb_pkts;
+}
+