return nb_tx;
}
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
static uint16_t
ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
return nb_tx;
}
-#endif
static inline void
ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq,
return i;
}
+ /* check the size of packet */
+ if (m->pkt_len < IXGBE_TX_MIN_PKT_LEN) {
+ rte_errno = EINVAL;
+ return i;
+ }
+
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
*
**********************************************************************/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
return ixgbe_tx_done_cleanup_full(txq, free_cnt);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
rte_free(txq->sw_ring);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
(txq->tx_rs_thresh >= RTE_PMD_IXGBE_TX_MAX_BURST)) {
PMD_INIT_LOG(DEBUG, "Using simple tx code path");
dev->tx_pkt_prepare = NULL;
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
if (txq->tx_rs_thresh <= RTE_IXGBE_TX_MAX_FREE_BUF_SZ &&
(rte_eal_process_type() != RTE_PROC_PRIMARY ||
ixgbe_txq_vec_setup(txq) == 0)) {
PMD_INIT_LOG(DEBUG, "Vector tx enabled.");
dev->tx_pkt_burst = ixgbe_xmit_pkts_vec;
} else
-#endif
dev->tx_pkt_burst = ixgbe_xmit_pkts_simple;
} else {
PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
return tx_offload_capa;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
*
* @m scattered cluster head
*/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint16_t i, nb_segs = m->nb_segs;
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
-#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
/* SSE Vector driver has a different way of releasing mbufs. */
if (rxq->rx_using_sse) {
ixgbe_rx_queue_release_mbufs_vec(rxq);
return;
}
-#endif
if (rxq->sw_ring != NULL) {
for (i = 0; i < rxq->nb_rx_desc; i++) {
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int __attribute__((cold))
+static inline int __rte_cold
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
return offloads;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
/*
* Set up link loopback for X540/X550 mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
{
uint32_t macc;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
IXGBE_WRITE_FLUSH(hw);
}
-static int __attribute__((cold))
+static int __rte_cold
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
/*
* Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/*
* Start Transmit and Receive Units.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Start Transmit and Receive Units.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
{
return -1;
}
+
+uint16_t
+ixgbe_xmit_fixed_burst_vec(void __rte_unused *tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int
+ixgbe_txq_vec_setup(struct ixgbe_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void
+ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue __rte_unused *rxq)
+{
+ return;
+}
#endif