#
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
+SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
#include "vnic_dev.h"
#include "vnic_rq.h"
-static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
-{
- struct vnic_rq_buf *buf;
- unsigned int i, j, count = rq->ring.desc_count;
- unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
-
- for (i = 0; i < blks; i++) {
- rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
- if (!rq->bufs[i])
- return -ENOMEM;
- }
-
- for (i = 0; i < blks; i++) {
- buf = rq->bufs[i];
- for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
- buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
- buf->desc = (u8 *)rq->ring.descs +
- rq->ring.desc_size * buf->index;
- if (buf->index + 1 == count) {
- buf->next = rq->bufs[0];
- break;
- } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
- buf->next = rq->bufs[i + 1];
- } else {
- buf->next = buf + 1;
- buf++;
- }
- }
- }
-
- rq->to_use = rq->to_clean = rq->bufs[0];
-
- return 0;
-}
-
-int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
- unsigned int desc_size)
-{
- int mem_size = 0;
-
- mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
-
- mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
- VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
-
- return mem_size;
-}
-
void vnic_rq_free(struct vnic_rq *rq)
{
struct vnic_dev *vdev;
- unsigned int i;
vdev = rq->vdev;
vnic_dev_free_desc_ring(vdev, &rq->ring);
- for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
- if (rq->bufs[i]) {
- kfree(rq->bufs[i]);
- rq->bufs[i] = NULL;
- }
- }
-
rq->ctrl = NULL;
}
int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
unsigned int desc_count, unsigned int desc_size)
{
- int err;
+ int rc;
char res_name[NAME_MAX];
static int instance;
vnic_rq_disable(rq);
snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
- err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
+ rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
rq->socket_id, res_name);
- if (err)
- return err;
-
- err = vnic_rq_alloc_bufs(rq);
- if (err) {
- vnic_rq_free(rq);
- return err;
- }
-
- return 0;
+ return rc;
}
void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
iowrite32(fetch_index, &rq->ctrl->fetch_index);
iowrite32(posted_index, &rq->ctrl->posted_index);
- rq->to_use = rq->to_clean =
- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
}
void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
fetch_index, fetch_index,
error_interrupt_enable,
error_interrupt_offset);
+ rq->rxst_idx = 0;
+ rq->tot_pkts = 0;
}
void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
}
void vnic_rq_clean(struct vnic_rq *rq,
- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
+ void (*buf_clean)(struct rte_mbuf **buf))
{
- struct vnic_rq_buf *buf;
- u32 fetch_index;
+ struct rte_mbuf **buf;
+ u32 fetch_index, i;
unsigned int count = rq->ring.desc_count;
- buf = rq->to_clean;
-
- while (vnic_rq_desc_used(rq) > 0) {
+ buf = &rq->mbuf_ring[0];
- (*buf_clean)(rq, buf);
-
- buf = rq->to_clean = buf->next;
- rq->ring.desc_avail++;
+ for (i = 0; i < count; i++) {
+ (*buf_clean)(buf);
+ buf++;
}
+ rq->ring.desc_avail = count - 1;
+ rq->rx_nb_hold = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
/* Hardware surprise removal: reset fetch_index */
fetch_index = 0;
}
- rq->to_use = rq->to_clean =
- &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
- [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
+
iowrite32(fetch_index, &rq->ctrl->posted_index);
vnic_dev_clear_desc_ring(&rq->ring);
u32 pad10;
};
-/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
-#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
-#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
-#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
- ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
- VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
-#define VNIC_RQ_BUF_BLK_SZ(entries) \
- (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
-#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
- DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
-#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
-
-struct vnic_rq_buf {
- struct vnic_rq_buf *next;
- dma_addr_t dma_addr;
- void *os_buf;
- unsigned int os_buf_index;
- unsigned int len;
- unsigned int index;
- void *desc;
- uint64_t wr_id;
-};
-
struct vnic_rq {
unsigned int index;
+ unsigned int posted_index;
struct vnic_dev *vdev;
- struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
+ struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
struct vnic_dev_ring ring;
- struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
- struct vnic_rq_buf *to_use;
- struct vnic_rq_buf *to_clean;
+ struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
+ unsigned int mbuf_next_idx; /* next mb to consume */
void *os_buf_head;
unsigned int pkts_outstanding;
-
+ uint16_t rx_nb_hold;
+ uint16_t rx_free_thresh;
unsigned int socket_id;
struct rte_mempool *mp;
+ uint16_t rxst_idx;
+ uint32_t tot_pkts;
};
static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
return rq->ring.desc_count - rq->ring.desc_avail - 1;
}
-static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
-{
- return rq->to_use->desc;
-}
-
-static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
-{
- return rq->to_use->index;
-}
-
-static inline void vnic_rq_post(struct vnic_rq *rq,
- void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len,
- uint64_t wrid)
-{
- struct vnic_rq_buf *buf = rq->to_use;
-
- buf->os_buf = os_buf;
- buf->os_buf_index = os_buf_index;
- buf->dma_addr = dma_addr;
- buf->len = len;
- buf->wr_id = wrid;
-
- buf = buf->next;
- rq->to_use = buf;
- rq->ring.desc_avail--;
-
- /* Move the posted_index every nth descriptor
- */
-
-#ifndef VNIC_RQ_RETURN_RATE
-#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
-#endif
-
- if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &rq->ctrl->posted_index);
- }
-}
-
-static inline void vnic_rq_post_commit(struct vnic_rq *rq,
- void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
-{
- struct vnic_rq_buf *buf = rq->to_use;
-
- buf->os_buf = os_buf;
- buf->os_buf_index = os_buf_index;
- buf->dma_addr = dma_addr;
- buf->len = len;
-
- buf = buf->next;
- rq->to_use = buf;
- rq->ring.desc_avail--;
-
- /* Move the posted_index every descriptor
- */
-
- /* Adding write memory barrier prevents compiler and/or CPU
- * reordering, thus avoiding descriptor posting before
- * descriptor is initialized. Otherwise, hardware can read
- * stale descriptor fields.
- */
- wmb();
- iowrite32(buf->index, &rq->ctrl->posted_index);
-}
-static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
-{
- rq->ring.desc_avail += count;
-}
enum desc_return_options {
VNIC_RQ_RETURN_DESC,
VNIC_RQ_DEFER_RETURN_DESC,
};
-static inline int vnic_rq_service(struct vnic_rq *rq,
- struct cq_desc *cq_desc, u16 completed_index,
- int desc_return, int (*buf_service)(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque), void *opaque)
-{
- struct vnic_rq_buf *buf;
- int skipped;
- int eop = 0;
-
- buf = rq->to_clean;
- while (1) {
-
- skipped = (buf->index != completed_index);
-
- if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
- eop++;
-
- if (desc_return == VNIC_RQ_RETURN_DESC)
- rq->ring.desc_avail++;
-
- rq->to_clean = buf->next;
-
- if (!skipped)
- break;
-
- buf = rq->to_clean;
- }
- return eop;
-}
-
static inline int vnic_rq_fill(struct vnic_rq *rq,
int (*buf_fill)(struct vnic_rq *rq))
{
void vnic_rq_enable(struct vnic_rq *rq);
int vnic_rq_disable(struct vnic_rq *rq);
void vnic_rq_clean(struct vnic_rq *rq,
- void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
-int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
- unsigned int desc_size);
-
+ void (*buf_clean)(struct rte_mbuf **buf));
#endif /* _VNIC_RQ_H_ */
#include "vnic_nic.h"
#include "vnic_rss.h"
#include "enic_res.h"
+#include "cq_enet_desc.h"
#define DRV_NAME "enic_pmd"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
return (struct enic *)eth_dev->data->dev_private;
}
+#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
+#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
+#define ASSERT(x) do { \
+ if (!(x)) \
+ rte_panic("ENIC: x"); \
+} while (0)
+#else
+#define ASSERT(x)
+#endif
+
extern void enic_fdir_stats_get(struct enic *enic,
struct rte_eth_fdir_stats *stats);
extern int enic_fdir_add_fltr(struct enic *enic,
uint16_t ol_flags, uint16_t vlan_tag);
extern void enic_post_wq_index(struct vnic_wq *wq);
-extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
- unsigned int budget, unsigned int *work_done);
extern int enic_probe(struct enic *enic);
extern int enic_clsf_init(struct enic *enic);
extern void enic_clsf_destroy(struct enic *enic);
+uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+
#endif /* _ENIC_H_ */
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_rxconf *rx_conf,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
int ret;
return ret;
}
+ enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
+ dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
+ enic->rq[queue_idx].rx_free_thresh);
+
return enicpmd_dev_setup_intr(enic);
}
DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM;
+ device_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
+ };
}
static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
return index;
}
-static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
-{
- struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
- unsigned int work_done;
-
- if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
- dev_err(enic, "error in enicpmd poll\n");
-
- return work_done;
-}
-
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
enic->port_id = eth_dev->data->port_id;
enic->rte_dev = eth_dev;
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
- eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
+ eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
pdev = eth_dev->pci_dev;
* Register as the [Poll Mode] Driver of Cisco ENIC device.
*/
static int
-rte_enic_pmd_init(const char *name __rte_unused,
- const char *params __rte_unused)
+rte_enic_pmd_init(__rte_unused const char *name,
+ __rte_unused const char *params)
{
ENICPMD_FUNC_TRACE();
#include "vnic_nic.h"
#include "enic_vnic_wq.h"
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+
static inline int enic_is_sriov_vf(struct enic *enic)
{
return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
return !is_mcast_addr(addr) && !is_zero_addr(addr);
}
-static inline struct rte_mbuf *
-enic_rxmbuf_alloc(struct rte_mempool *mp)
+static void
+enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
{
- struct rte_mbuf *m;
+ uint16_t i;
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
+ if (!rq || !rq->mbuf_ring) {
+ dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < enic->config.rq_desc_count; i++) {
+ if (rq->mbuf_ring[i]) {
+ rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
+ rq->mbuf_ring[i] = NULL;
+ }
+ }
}
+
void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
{
vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
}
static void
-enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
+enic_free_rq_buf(struct rte_mbuf **mbuf)
{
- if (!buf->os_buf)
+ if (*mbuf == NULL)
return;
- rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
- buf->os_buf = NULL;
+ rte_pktmbuf_free(*mbuf);
+ mbuf = NULL;
}
void enic_init_vnic_resources(struct enic *enic)
}
-static int enic_rq_alloc_buf(struct vnic_rq *rq)
+static int
+enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
{
- struct enic *enic = vnic_dev_priv(rq->vdev);
+ struct rte_mbuf *mb;
+ struct rq_enet_desc *rqd = rq->ring.descs;
+ unsigned i;
dma_addr_t dma_addr;
- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
- uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
- u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
- struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
- struct rte_mbuf *hdr_mbuf = NULL;
-
- if (!mbuf) {
- dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
- return -1;
- }
-
- if (unlikely(split_hdr_size)) {
- if (vnic_rq_desc_avail(rq) < 2) {
- rte_mempool_put(mbuf->pool, mbuf);
- return -1;
- }
- hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
- if (!hdr_mbuf) {
- rte_mempool_put(mbuf->pool, mbuf);
- dev_err(enic,
- "hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
- return -1;
- }
-
- hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
-
- hdr_mbuf->nb_segs = 2;
- hdr_mbuf->port = enic->port_id;
- hdr_mbuf->next = mbuf;
-
- dma_addr = (dma_addr_t)
- (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);
-
- rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);
- vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
- (unsigned int)split_hdr_size, 0 /*wrid*/);
+ dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index,
+ rq->ring.desc_count);
- desc = vnic_rq_next_desc(rq);
- type = RQ_ENET_TYPE_NOT_SOP;
- } else {
- mbuf->nb_segs = 1;
- mbuf->port = enic->port_id;
- }
-
- mbuf->data_off = RTE_PKTMBUF_HEADROOM;
- mbuf->next = NULL;
-
- dma_addr = (dma_addr_t)
- (mbuf->buf_physaddr + mbuf->data_off);
-
- rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);
-
- vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
- (unsigned int)mbuf->buf_len, 0 /*wrid*/);
-
- return 0;
-}
-
-static int enic_rq_indicate_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;
- struct rte_mbuf *rx_pkt = NULL;
- struct rte_mbuf *hdr_rx_pkt = NULL;
-
- u8 type, color, eop, sop, ingress_port, vlan_stripped;
- u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
- u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
- u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
- u8 packet_error;
- u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
- u32 rss_hash;
-
- cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
- &type, &color, &q_number, &completed_index,
- &ingress_port, &fcoe, &eop, &sop, &rss_type,
- &csum_not_calc, &rss_hash, &bytes_written,
- &packet_error, &vlan_stripped, &vlan_tci, &checksum,
- &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
- &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
- &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
- &fcs_ok);
-
- rx_pkt = (struct rte_mbuf *)buf->os_buf;
- buf->os_buf = NULL;
-
- if (unlikely(packet_error)) {
- dev_err(enic, "packet error\n");
- rx_pkt->data_len = 0;
- return 0;
- }
-
- if (unlikely(skipped)) {
- rx_pkt->data_len = 0;
- return 0;
- }
-
- if (likely(!vnic_get_hdr_split_size(enic->vdev))) {
- /* No header split configured */
- *rx_pkt_bucket = rx_pkt;
- rx_pkt->pkt_len = bytes_written;
-
- if (ipv4) {
- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (!csum_not_calc) {
- if (unlikely(!ipv4_csum_ok))
- rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
- if ((tcp || udp) && (!tcp_udp_csum_ok))
- rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- }
- } else if (ipv6)
- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
- } else {
- /* Header split */
- if (sop && !eop) {
- /* This piece is header */
- *rx_pkt_bucket = rx_pkt;
- rx_pkt->pkt_len = bytes_written;
- } else {
- if (sop && eop) {
- /* The packet is smaller than split_hdr_size */
- *rx_pkt_bucket = rx_pkt;
- rx_pkt->pkt_len = bytes_written;
- if (ipv4) {
- rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
- if (!csum_not_calc) {
- if (unlikely(!ipv4_csum_ok))
- rx_pkt->ol_flags |=
- PKT_RX_IP_CKSUM_BAD;
-
- if ((tcp || udp) &&
- (!tcp_udp_csum_ok))
- rx_pkt->ol_flags |=
- PKT_RX_L4_CKSUM_BAD;
- }
- } else if (ipv6)
- rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
- } else {
- /* Payload */
- hdr_rx_pkt = *rx_pkt_bucket;
- hdr_rx_pkt->pkt_len += bytes_written;
- if (ipv4) {
- hdr_rx_pkt->packet_type =
- RTE_PTYPE_L3_IPV4;
- if (!csum_not_calc) {
- if (unlikely(!ipv4_csum_ok))
- hdr_rx_pkt->ol_flags |=
- PKT_RX_IP_CKSUM_BAD;
-
- if ((tcp || udp) &&
- (!tcp_udp_csum_ok))
- hdr_rx_pkt->ol_flags |=
- PKT_RX_L4_CKSUM_BAD;
- }
- } else if (ipv6)
- hdr_rx_pkt->packet_type =
- RTE_PTYPE_L3_IPV6;
- }
+ for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
+ mb = rte_rxmbuf_alloc(rq->mp);
+ if (mb == NULL) {
+ dev_err(enic, "RX mbuf alloc failed queue_id=%u",
+ (unsigned)rq->index);
+ return -ENOMEM;
}
- }
- rx_pkt->data_len = bytes_written;
+ dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
- if (rss_hash) {
- rx_pkt->ol_flags |= PKT_RX_RSS_HASH;
- rx_pkt->hash.rss = rss_hash;
+ rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
+ mb->buf_len);
+ rq->mbuf_ring[i] = mb;
}
- if (vlan_tci) {
- rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
- rx_pkt->vlan_tci = vlan_tci;
- }
+ /* make sure all prior writes are complete before doing the PIO write */
+ rte_rmb();
- return eop;
-}
+ /* Post all but the last 2 cache lines' worth of descriptors */
+ rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
+ / sizeof(struct rq_enet_desc));
+ rq->rx_nb_hold = 0;
-static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- return vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_indicate_buf, opaque);
-
-}
+ dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
+ enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
+ rte_rmb();
-int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
- unsigned int budget, unsigned int *work_done)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- unsigned int cq = enic_cq_rq(enic, rq->index);
- int err = 0;
-
- *work_done = vnic_cq_service(&enic->cq[cq],
- budget, enic_rq_service, (void *)rx_pkts);
-
- if (*work_done) {
- vnic_rq_fill(rq, enic_rq_alloc_buf);
+ return 0;
- /* Need at least one buffer on ring to get going */
- if (vnic_rq_desc_used(rq) == 0) {
- dev_err(enic, "Unable to alloc receive buffers\n");
- err = -1;
- }
- }
- return err;
}
static void *
int enic_enable(struct enic *enic)
{
unsigned int index;
+ int err;
struct rte_eth_dev *eth_dev = enic->rte_dev;
eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
dev_warning(enic, "Init of hash table for clsf failed."\
"Flow director feature will not work\n");
- /* Fill RQ bufs */
for (index = 0; index < enic->rq_count; index++) {
- vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);
-
- /* Need at least one buffer on ring to get going
- */
- if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
- dev_err(enic, "Unable to alloc receive buffers\n");
- return -1;
+ err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
+ if (err) {
+ dev_err(enic, "Failed to alloc RX queue mbufs\n");
+ return err;
}
}
struct vnic_rq *rq = (struct vnic_rq *)rxq;
struct enic *enic = vnic_dev_priv(rq->vdev);
+ enic_rxmbuf_queue_release(enic, rq);
+ rte_free(rq->mbuf_ring);
+ rq->mbuf_ring = NULL;
vnic_rq_free(rq);
vnic_cq_free(&enic->cq[rq->index]);
}
unsigned int socket_id, struct rte_mempool *mp,
uint16_t nb_desc)
{
- int err;
+ int rc;
struct vnic_rq *rq = &enic->rq[queue_idx];
rq->socket_id = socket_id;
}
/* Allocate queue resources */
- err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
- enic->config.rq_desc_count,
- sizeof(struct rq_enet_desc));
- if (err) {
+ rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
+ enic->config.rq_desc_count, sizeof(struct rq_enet_desc));
+ if (rc) {
dev_err(enic, "error in allocation of rq\n");
- return err;
+ goto err_exit;
}
- err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
+ rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
socket_id, enic->config.rq_desc_count,
sizeof(struct cq_enet_rq_desc));
- if (err) {
- vnic_rq_free(rq);
+ if (rc) {
dev_err(enic, "error in allocation of cq for rq\n");
+ goto err_free_rq_exit;
}
- return err;
+ /* Allocate the mbuf ring */
+ rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
+ sizeof(struct rte_mbuf *) * enic->config.rq_desc_count,
+ RTE_CACHE_LINE_SIZE, rq->socket_id);
+
+ if (rq->mbuf_ring != NULL)
+ return 0;
+
+ /* cleanup on error */
+ vnic_cq_free(&enic->cq[queue_idx]);
+err_free_rq_exit:
+ vnic_rq_free(rq);
+err_exit:
+ return -ENOMEM;
}
void enic_free_wq(void *txq)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
+
for (i = 0; i < enic->rq_count; i++)
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
for (i = 0; i < enic->cq_count; i++)
/* Set ingress vlan rewrite mode before vnic initialization */
err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
- IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
+ IG_VLAN_REWRITE_MODE_PASS_THRU);
if (err) {
dev_err(enic,
"Failed to set ingress vlan rewrite mode, aborting.\n");
#define ENIC_UNICAST_PERFECT_FILTERS 32
#define ENIC_NON_TSO_MAX_DESC 16
+#define ENIC_DEFAULT_RX_FREE_THRESH 32
#define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
WQ_ENET_OFFLOAD_MODE_TSO,
eop, 1 /* SOP */, eop, loopback);
}
-static inline void enic_queue_rq_desc(struct vnic_rq *rq,
- void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
-{
- struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
- u64 wrid = 0;
- u8 type = os_buf_index ?
- RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
-
- rq_enet_desc_enc(desc,
- (u64)dma_addr | VNIC_PADDR_TARGET,
- type, (u16)len);
-
- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
-}
struct enic;
--- /dev/null
+/*
+ * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2007 Nuova Systems, Inc. All rights reserved.
+ *
+ * Copyright (c) 2014, Cisco Systems, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <rte_mbuf.h>
+#include <rte_ethdev.h>
+#include <rte_prefetch.h>
+
+#include "enic_compat.h"
+#include "rq_enet_desc.h"
+#include "enic.h"
+
+#define RTE_PMD_USE_PREFETCH
+
+#ifdef RTE_PMD_USE_PREFETCH
+/*
+ * Prefetch a cache line into all cache levels.
+ */
+#define rte_enic_prefetch(p) rte_prefetch0(p)
+#else
+#define rte_enic_prefetch(p) do {} while (0)
+#endif
+
+#ifdef RTE_PMD_PACKET_PREFETCH
+#define rte_packet_prefetch(p) rte_prefetch1(p)
+#else
+#define rte_packet_prefetch(p) do {} while (0)
+#endif
+
+static inline struct rte_mbuf *
+rte_rxmbuf_alloc(struct rte_mempool *mp)
+{
+ struct rte_mbuf *m;
+
+ m = __rte_mbuf_raw_alloc(mp);
+ __rte_mbuf_sanity_check_raw(m, 0);
+ return m;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
+{
+ return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
+}
+
+static inline uint16_t
+enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
+{
+ return(le16_to_cpu(crd->bytes_written_flags) &
+ ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_packet_error(uint16_t bwflags)
+{
+ return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
+ CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_eop(uint16_t ciflags)
+{
+ return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
+ == CQ_ENET_RQ_DESC_FLAGS_EOP;
+}
+
+static inline uint8_t
+enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
+{
+ return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
+ CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
+{
+ return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
+ CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
+}
+
+static inline uint32_t
+enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
+{
+ return le32_to_cpu(cqrd->rss_hash);
+}
+
+static inline uint8_t
+enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
+{
+ return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
+ CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
+{
+ return le16_to_cpu(cqrd->vlan);
+}
+
+static inline uint16_t
+enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ return le16_to_cpu(cqrd->bytes_written_flags) &
+ CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
+}
+
+static inline uint64_t
+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t bwflags;
+ uint64_t pkt_err_flags = 0;
+
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+
+ /* Check for packet error. Can't be more specific than MAC error */
+ if (enic_cq_rx_desc_packet_error(bwflags)) {
+ pkt_err_flags |= PKT_RX_MAC_ERR;
+ }
+
+ /* Check for bad FCS. MAC error isn't quite, but no other choice */
+ if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
+ pkt_err_flags |= PKT_RX_MAC_ERR;
+ }
+ return pkt_err_flags;
+}
+
+/*
+ * Lookup table to translate RX CQ flags to mbuf flags.
+ */
+static inline uint32_t
+enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint8_t cqrd_flags = cqrd->flags;
+ static const uint32_t cq_type_table[128] __rte_cache_aligned = {
+ [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+ [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
+ | RTE_PTYPE_L4_UDP,
+ [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
+ | RTE_PTYPE_L4_TCP,
+ [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
+ | RTE_PTYPE_L4_FRAG,
+ [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+ [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
+ | RTE_PTYPE_L4_UDP,
+ [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
+ | RTE_PTYPE_L4_TCP,
+ [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
+ | RTE_PTYPE_L4_FRAG,
+ /* All others reserved */
+ };
+ cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
+ | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
+ | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
+ return cq_type_table[cqrd_flags];
+}
+
+static inline void
+enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
+{
+ struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
+ uint16_t ciflags, bwflags, pkt_flags = 0;
+ ciflags = enic_cq_rx_desc_ciflags(cqrd);
+ bwflags = enic_cq_rx_desc_bwflags(cqrd);
+
+ ASSERT(mbuf->ol_flags == 0);
+
+ /* flags are meaningless if !EOP */
+ if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
+ goto mbuf_flags_done;
+
+ /* VLAN stripping */
+ if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
+ pkt_flags |= PKT_RX_VLAN_PKT;
+ mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
+ } else {
+ mbuf->vlan_tci = 0;
+ }
+
+ /* RSS flag */
+ if (enic_cq_rx_desc_rss_type(cqrd)) {
+ pkt_flags |= PKT_RX_RSS_HASH;
+ mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
+ }
+
+ /* checksum flags */
+ if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
+ (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
+ if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
+ pkt_flags |= PKT_RX_IP_CKSUM_BAD;
+ if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
+ if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
+ pkt_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+
+ mbuf_flags_done:
+ mbuf->ol_flags = pkt_flags;
+}
+
+static inline uint32_t
+enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
+{
+ uint32_t d = i0 + i1;
+ ASSERT(i0 < n_descriptors);
+ ASSERT(i1 < n_descriptors);
+ d -= (d >= n_descriptors) ? n_descriptors : 0;
+ return d;
+}
+
+
+uint16_t
+enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ struct vnic_rq *rq = rx_queue;
+ struct enic *enic = vnic_dev_priv(rq->vdev);
+ unsigned int rx_id;
+ struct rte_mbuf *nmb, *rxmb;
+ uint16_t nb_rx = 0;
+ uint16_t nb_hold;
+ struct vnic_cq *cq;
+ volatile struct cq_desc *cqd_ptr;
+ uint8_t color;
+
+ cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
+
+ nb_hold = rq->rx_nb_hold; /* mbufs held by software */
+
+ while (nb_rx < nb_pkts) {
+ uint16_t rx_pkt_len;
+ volatile struct rq_enet_desc *rqd_ptr;
+ dma_addr_t dma_addr;
+ struct cq_desc cqd;
+ uint64_t ol_err_flags;
+
+ /* Check for pkts available */
+ color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
+ & CQ_DESC_COLOR_MASK;
+ if (color == cq->last_color)
+ break;
+
+ /* Get the cq descriptor and rq pointer */
+ cqd = *cqd_ptr;
+ rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
+
+ /* allocate a new mbuf */
+ nmb = rte_rxmbuf_alloc(rq->mp);
+ if (nmb == NULL) {
+ dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
+ enic->port_id, (unsigned)rq->index);
+ rte_eth_devices[enic->port_id].
+ data->rx_mbuf_alloc_failed++;
+ break;
+ }
+
+ /* Check for FCS or packet errors */
+ ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
+ if (ol_err_flags == 0)
+ rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
+ else
+ rx_pkt_len = 0;
+
+ /* Get the mbuf to return and replace with one just allocated */
+ rxmb = rq->mbuf_ring[rx_id];
+ rq->mbuf_ring[rx_id] = nmb;
+
+ /* Increment cqd, rqd, mbuf_table index */
+ rx_id++;
+ if (unlikely(rx_id == rq->ring.desc_count)) {
+ rx_id = 0;
+ cq->last_color = cq->last_color ? 0 : 1;
+ }
+
+ /* Prefetch next mbuf & desc while processing current one */
+ cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
+ rte_enic_prefetch(cqd_ptr);
+ rte_enic_prefetch(rq->mbuf_ring[rx_id]);
+ rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
+ + rx_id);
+
+ /* Push descriptor for newly allocated mbuf */
+ dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
+ rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
+
+ /* Fill in the rest of the mbuf */
+ rxmb->data_off = RTE_PKTMBUF_HEADROOM;
+ rxmb->nb_segs = 1;
+ rxmb->next = NULL;
+ rxmb->pkt_len = rx_pkt_len;
+ rxmb->data_len = rx_pkt_len;
+ rxmb->port = enic->port_id;
+ rxmb->ol_flags = ol_err_flags;
+ if (!ol_err_flags)
+ enic_cq_rx_to_pkt_flags(&cqd, rxmb);
+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
+
+ /* prefetch mbuf data for caller */
+ rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
+ RTE_PKTMBUF_HEADROOM));
+
+ /* store the mbuf address into the next entry of the array */
+ rx_pkts[nb_rx++] = rxmb;
+ }
+
+ nb_hold += nb_rx;
+ cq->to_clean = rx_id;
+
+ if (nb_hold > rq->rx_free_thresh) {
+ rq->posted_index = enic_ring_add(rq->ring.desc_count,
+ rq->posted_index, nb_hold);
+ nb_hold = 0;
+ rte_mb();
+ iowrite32(rq->posted_index, &rq->ctrl->posted_index);
+ }
+
+ rq->rx_nb_hold = nb_hold;
+
+ return nb_rx;
+}