rte_memzone_free(txq->mz);
if (txq->sw_ring)
rte_free(txq->sw_ring);
+ if (txq->free)
+ rte_free(txq->free);
rte_free(txq);
}
}
/* Don't need alloc sw_ring, because upper applications don't use it */
txq->sw_ring = NULL;
+ txq->free = NULL;
txq->hns = hns;
txq->tx_deferred_start = false;
txq->tx_bd_ready = txq->nb_tx_desc - 1;
txq->tx_free_thresh = tx_free_thresh;
txq->tx_rs_thresh = tx_rs_thresh;
+ txq->free = rte_zmalloc_socket("hns3 TX mbuf free array",
+ sizeof(struct rte_mbuf *) * txq->tx_rs_thresh,
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!txq->free) {
+ hns3_err(hw, "failed to allocate tx mbuf free array!");
+ hns3_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+
txq->port_id = dev->data->port_id;
txq->pvid_state = hw->port_base_vlan_cfg.state;
txq->configured = true;
return nb_tx;
}
+int __rte_weak
+hns3_tx_check_vec_support(__rte_unused struct rte_eth_dev *dev)
+{
+ return -ENOTSUP;
+}
+
+uint16_t __rte_weak
+hns3_xmit_pkts_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
int
hns3_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
struct rte_eth_burst_mode *mode)
info = "Scalar Simple";
else if (pkt_burst == hns3_xmit_pkts)
info = "Scalar";
+ else if (pkt_burst == hns3_xmit_pkts_vec)
+ info = "Vector Neon";
if (info == NULL)
return -EINVAL;
uint64_t offloads = dev->data->dev_conf.txmode.offloads;
struct hns3_adapter *hns = dev->data->dev_private;
+ if (hns->tx_vec_allowed && hns3_tx_check_vec_support(dev) == 0) {
+ *prep = NULL;
+ return hns3_xmit_pkts_vec;
+ }
+
if (hns->tx_simple_allowed &&
offloads == (offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) {
*prep = NULL;