struct rte_mbuf *pkt_last_seg; /* Last segment of current packet. */
uint64_t hw_ring_phys_addr;
uint64_t mbuf_initializer; /* value to init mbufs */
- /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
+ /* need to alloc dummy mbuf, for wraparound when scanning hw ring */
struct rte_mbuf fake_mbuf;
uint16_t next_dd;
uint16_t next_alloc;
uint16_t alloc_thresh;
volatile uint32_t *tail_ptr;
uint16_t nb_desc;
+ /* Number of faked desc added at the tail for Vector RX function */
+ uint16_t nb_fake_desc;
uint16_t queue_id;
/* Below 2 fields only valid in case vPMD is applied. */
uint16_t rxrearm_nb; /* number of remaining to be re-armed */
static inline int
rx_queue_reset(struct fm10k_rx_queue *q)
{
+ static const union fm10k_rx_desc zero = {{0} };
uint64_t dma_addr;
int i, diag;
PMD_INIT_FUNC_TRACE();
q->hw_ring[i].q.hdr_addr = dma_addr;
}
+ /* initialize extra software ring entries. Space for these extra
+ * entries is always allocated.
+ */
+ memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
+ for (i = 0; i < q->nb_fake_desc; ++i) {
+ q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
+ q->hw_ring[q->nb_desc + i] = zero;
+ }
+
q->next_dd = 0;
q->next_alloc = 0;
q->next_trigger = q->alloc_thresh - 1;
for (i = 0; i < q->nb_desc; ++i)
q->hw_ring[i] = zero;
+ /* zero faked descriptors */
+ for (i = 0; i < q->nb_fake_desc; ++i)
+ q->hw_ring[q->nb_desc + i] = zero;
+
/* vPMD driver has a different way of releasing mbufs. */
if (q->rx_using_sse) {
fm10k_rx_queue_release_mbufs_vec(q);
/* setup queue */
q->mp = mp;
q->nb_desc = nb_desc;
+ q->nb_fake_desc = FM10K_MULT_RX_DESC;
q->port_id = dev->data->port_id;
q->queue_id = queue_id;
q->tail_ptr = (volatile uint32_t *)
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
- nb_desc * sizeof(struct rte_mbuf *),
- RTE_CACHE_LINE_SIZE, socket_id);
+ (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
+ RTE_CACHE_LINE_SIZE, socket_id);
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);