#include <rte_mbuf.h>
#include <rte_malloc.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
#include <rte_udp.h>
static inline bool
check_tx_vec_allow(struct avf_tx_queue *txq)
{
- if ((txq->txq_flags & AVF_SIMPLE_FLAGS) == AVF_SIMPLE_FLAGS &&
+ if (!(txq->offloads & AVF_NO_VECTOR_FLAGS) &&
txq->rs_thresh >= AVF_VPMD_TX_MAX_BURST &&
txq->rs_thresh <= AVF_VPMD_TX_MAX_FREE_BUF) {
PMD_INIT_LOG(DEBUG, "Vector tx can be enabled on this txq.");
static inline void
release_rxq_mbufs(struct avf_rx_queue *rxq)
{
- struct rte_mbuf *mbuf;
uint16_t i;
if (!rxq->sw_ring)
struct avf_rx_queue *rxq;
const struct rte_memzone *mz;
uint32_t ring_size;
- uint16_t len, i;
+ uint16_t len;
uint16_t rx_free_thresh;
- uint16_t base, bsf, tc_mapping;
PMD_INIT_FUNC_TRACE();
const struct rte_eth_txconf *tx_conf)
{
struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct avf_adapter *ad =
- AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
struct avf_tx_queue *txq;
const struct rte_memzone *mz;
uint32_t ring_size;
uint16_t tx_rs_thresh, tx_free_thresh;
- uint16_t i, base, bsf, tc_mapping;
+ uint64_t offloads;
PMD_INIT_FUNC_TRACE();
+ offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
+
if (nb_desc % AVF_ALIGN_RING_DESC != 0 ||
nb_desc > AVF_MAX_RING_DESC ||
nb_desc < AVF_MIN_RING_DESC) {
txq->free_thresh = tx_free_thresh;
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->txq_flags = tx_conf->txq_flags;
+ txq->offloads = offloads;
txq->tx_deferred_start = tx_conf->tx_deferred_start;
/* Allocate software ring */
txq->ops = &def_txq_ops;
#ifdef RTE_LIBRTE_AVF_INC_VECTOR
- if (check_tx_vec_allow(txq) == FALSE)
+ if (check_tx_vec_allow(txq) == FALSE) {
+ struct avf_adapter *ad =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
ad->tx_vec_allowed = false;
+ }
#endif
return 0;
rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct avf_rx_queue *rxq = (struct avf_rx_queue *)rx_queue;
- struct rte_eth_dev *dev;
uint16_t nb_rx = 0;
if (!nb_pkts)
if (nb_ctx) {
/* Setup TX context descriptor if required */
- volatile struct avf_tx_context_desc *ctx_txd =
- (volatile struct avf_tx_context_desc *)
- &txr[tx_id];
- uint16_t cd_l2tag2 = 0;
uint64_t cd_type_cmd_tso_mss =
AVF_TX_DESC_DTYPE_CONTEXT;
cd_type_cmd_tso_mss |=
avf_set_tso_ctx(tx_pkt, tx_offload);
- AVF_DUMP_TX_DESC(txq, ctx_txd, tx_id);
+ AVF_DUMP_TX_DESC(txq, &txr[tx_id], tx_id);
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
qinfo->conf.tx_free_thresh = txq->free_thresh;
qinfo->conf.tx_rs_thresh = txq->rs_thresh;
- qinfo->conf.txq_flags = txq->txq_flags;
+ qinfo->conf.offloads = txq->offloads;
qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
}
return RTE_ETH_TX_DESC_FULL;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_recv_scattered_pkts_vec(__rte_unused void *rx_queue,
__rte_unused struct rte_mbuf **rx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
avf_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
__rte_unused struct rte_mbuf **tx_pkts,
__rte_unused uint16_t nb_pkts)
return 0;
}
-int __attribute__((weak))
+__rte_weak int
avf_rxq_vec_setup(__rte_unused struct avf_rx_queue *rxq)
{
return -1;
}
-int __attribute__((weak))
+__rte_weak int
avf_txq_vec_setup(__rte_unused struct avf_tx_queue *txq)
{
return -1;