[Features]
Link status = Y
Link status event = Y
+Fast mbuf free = Y
Rx interrupt = Y
Queue start/stop = Y
MTU update = Y
unsigned int socket_id;
const struct rte_memzone *cqmsg_rz;
uint16_t last_completed_index;
+ uint64_t offloads;
};
static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq)
uint64_t rx_offload_capa; /* DEV_RX_OFFLOAD flags */
uint64_t tx_offload_capa; /* DEV_TX_OFFLOAD flags */
+ uint64_t tx_queue_offload_capa; /* DEV_TX_OFFLOAD flags */
uint64_t tx_offload_mask; /* PKT_TX flags accepted */
};
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
RTE_ASSERT(queue_idx < enic->conf_wq_count);
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
if (ret) {
device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
device_info->rx_offload_capa = enic->rx_offload_capa;
device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
ENICPMD_FUNC_TRACE();
qinfo->nb_desc = wq->ring.desc_count;
memset(&qinfo->conf, 0, sizeof(qinfo->conf));
- qinfo->conf.offloads = enic->tx_offload_capa;
+ qinfo->conf.offloads = wq->offloads;
/* tx_thresh, and all the other fields are not applicable for enic */
}
* Default hardware capabilities. enic_dev_init() may add additional
* flags if it enables overlay offloads.
*/
+ enic->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
enic->tx_offload_capa =
+ enic->tx_queue_offload_capa |
DEV_TX_OFFLOAD_MULTI_SEGS |
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
return nb_rx;
}
+static void enic_fast_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
+{
+ unsigned int desc_count, n, nb_to_free, tail_idx;
+ struct rte_mempool *pool;
+ struct rte_mbuf **m;
+
+ desc_count = wq->ring.desc_count;
+ nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index)
+ + 1;
+ tail_idx = wq->tail_idx;
+ wq->tail_idx += nb_to_free;
+ wq->ring.desc_avail += nb_to_free;
+ if (wq->tail_idx >= desc_count)
+ wq->tail_idx -= desc_count;
+ /* First, free at most until the end of ring */
+ m = &wq->bufs[tail_idx];
+ pool = (*m)->pool;
+ n = RTE_MIN(nb_to_free, desc_count - tail_idx);
+ rte_mempool_put_bulk(pool, (void **)m, n);
+ n = nb_to_free - n;
+ /* Then wrap and free the rest */
+ if (unlikely(n))
+ rte_mempool_put_bulk(pool, (void **)wq->bufs, n);
+}
+
static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index)
{
struct rte_mbuf *buf;
completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff;
if (wq->last_completed_index != completed_index) {
- enic_free_wq_bufs(wq, completed_index);
+ if (wq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ enic_fast_free_wq_bufs(wq, completed_index);
+ else
+ enic_free_wq_bufs(wq, completed_index);
wq->last_completed_index = completed_index;
}
return 0;