ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
if (unlikely(ol_flags)) {
/* L4 cksum */
- if (ol_flags & PKT_TX_TCP_CKSUM)
+ uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
+ if (l4_flags == PKT_TX_TCP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
- else if (ol_flags & PKT_TX_UDP_CKSUM)
+ else if (l4_flags == PKT_TX_UDP_CKSUM)
sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
else
sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
+
+ sqe.hdr.l3_offset = pkt->l2_len;
sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
/* L3 cksum */
- if (ol_flags & PKT_TX_IP_CKSUM) {
+ if (ol_flags & PKT_TX_IP_CKSUM)
sqe.hdr.csum_l3 = 1;
- sqe.hdr.l3_offset = pkt->l2_len;
- }
}
entry->buff[0] = sqe.buff[0];
void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
- rxq->nic->eth_dev->data->rx_mbuf_alloc_failed += to_fill;
+ rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
+ to_fill;
return 0;
}
rxq = dev->data->rx_queues[queue_idx];
return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
}
+
+uint32_t
+nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
+{
+ struct nicvf_rxq *rxq;
+ uint32_t to_process;
+ uint32_t rx_free;
+
+ rxq = dev->data->rx_queues[queue_idx];
+ to_process = rxq->recv_buffers;
+ while (rxq->recv_buffers > 0) {
+ rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
+ }
+
+ assert(rxq->recv_buffers == 0);
+ return to_process;
+}