#include <rte_udp.h>
#include <rte_ip.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "i40e_logs.h"
#include "base/i40e_prototype.h"
if (nb_hold > rxq->rx_free_thresh) {
rx_id = (uint16_t) ((rx_id == 0) ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
if (nb_hold > rxq->rx_free_thresh) {
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
- I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ I40E_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
(unsigned) txq->port_id, (unsigned) txq->queue_id,
(unsigned) tx_id, (unsigned) nb_tx);
- rte_cio_wmb();
- I40E_PCI_REG_WRITE_RELAXED(txq->qtx_tail, tx_id);
+ rte_io_wmb();
+ I40E_PCI_REG_WC_WRITE_RELAXED(txq->qtx_tail, tx_id);
txq->tx_tail = tx_id;
return nb_tx;
txq->tx_tail = 0;
/* Update the tx tail register */
- I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ I40E_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
PMD_INIT_FUNC_TRACE();
rxq = dev->data->rx_queues[rx_queue_id];
+ if (!rxq || !rxq->q_set) {
+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+ rx_queue_id);
+ return -EINVAL;
+ }
+
+ if (rxq->rx_deferred_start)
+ PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
+ rx_queue_id);
err = i40e_alloc_rx_queue_mbufs(rxq);
if (err) {
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
rxq = dev->data->rx_queues[rx_queue_id];
+ if (!rxq || !rxq->q_set) {
+ PMD_DRV_LOG(ERR, "RX queue %u not available or setup",
+ rx_queue_id);
+ return -EINVAL;
+ }
/*
* rx_queue_id is queue id application refers to, while
PMD_INIT_FUNC_TRACE();
txq = dev->data->tx_queues[tx_queue_id];
+ if (!txq || !txq->q_set) {
+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
+ tx_queue_id);
+ return -EINVAL;
+ }
+
+ if (txq->tx_deferred_start)
+ PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
+ tx_queue_id);
/*
* tx_queue_id is queue id application refers to, while
struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
txq = dev->data->tx_queues[tx_queue_id];
+ if (!txq || !txq->q_set) {
+ PMD_DRV_LOG(ERR, "TX queue %u is not available or setup",
+ tx_queue_id);
+ return -EINVAL;
+ }
/*
* tx_queue_id is queue id application refers to, while
continue;
i40e_dev_rx_queue_release(dev->data->rx_queues[i]);
dev->data->rx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "rx_ring", i);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
continue;
i40e_dev_tx_queue_release(dev->data->tx_queues[i]);
dev->data->tx_queues[i] = NULL;
+ rte_eth_dma_zone_free(dev, "tx_ring", i);
}
}
-#define I40E_FDIR_NUM_TX_DESC I40E_MIN_RING_DESC
-#define I40E_FDIR_NUM_RX_DESC I40E_MIN_RING_DESC
-
enum i40e_status_code
i40e_fdir_setup_tx_resources(struct i40e_pf *pf)
{
struct i40e_tx_queue *txq;
const struct rte_memzone *tz = NULL;
- uint32_t ring_size;
struct rte_eth_dev *dev;
+ uint32_t ring_size;
if (!pf) {
PMD_DRV_LOG(ERR, "PF is not available");
txq->tx_ring_phys_addr = tz->iova;
txq->tx_ring = (struct i40e_tx_desc *)tz->addr;
+
/*
* don't need to allocate software ring and reset for the fdir
* program queue just set the queue has been configured.
*/
txq->q_set = TRUE;
pf->fdir.txq = txq;
+ pf->fdir.txq_available_buf_count = I40E_FDIR_PRG_PKT_CNT;
return I40E_SUCCESS;
}
i40e_get_latest_rx_vec(bool scatter)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
i40e_recv_pkts_vec_avx2;
#endif
* use of AVX2 version to later plaforms, not all those that could
* theoretically run it.
*/
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
i40e_recv_pkts_vec_avx2;
#endif
i40e_recv_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
}
}
- if (ad->rx_vec_allowed) {
+ if (ad->rx_vec_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
/* Vec Rx path */
PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
dev->data->port_id);
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
struct i40e_adapter *ad =
i40e_get_latest_tx_vec(void)
{
#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return i40e_xmit_pkts_vec_avx2;
#endif
return i40e_xmit_pkts_vec;
* use of AVX2 version to later plaforms, not all those that could
* theoretically run it.
*/
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
return i40e_xmit_pkts_vec_avx2;
#endif
return i40e_xmit_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
}
if (ad->tx_simple_allowed) {
- if (ad->tx_vec_allowed) {
+ if (ad->tx_vec_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
if (ad->use_latest_vec)
dev->tx_pkt_burst =
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_pctype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
}
#ifndef RTE_LIBRTE_I40E_INC_VECTOR
-/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */
int
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{