return nb_sent;
}
-int __attribute__((cold))
+int __rte_cold
bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
{
uintptr_t p;
return nb_tx;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_tx_function(struct rte_eth_dev *dev)
{
struct fm10k_tx_queue *txq;
}
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_set_rx_function(struct rte_eth_dev *dev)
{
struct fm10k_dev_info *dev_info =
#define fm10k_desc_to_pktype_v(desc, rx_pkts) do {} while (0)
#endif
-int __attribute__((cold))
+int __rte_cold
fm10k_rx_vec_condition_check(struct rte_eth_dev *dev)
{
#ifndef RTE_LIBRTE_IEEE1588
#endif
}
-int __attribute__((cold))
+int __rte_cold
fm10k_rxq_vec_setup(struct fm10k_rx_queue *rxq)
{
uintptr_t p;
FM10K_PCI_REG_WRITE(rxq->tail_ptr, rx_id);
}
-void __attribute__((cold))
+void __rte_cold
fm10k_rx_queue_release_mbufs_vec(struct fm10k_rx_queue *rxq)
{
const unsigned mask = rxq->nb_desc - 1;
.reset = fm10k_reset_tx_queue,
};
-void __attribute__((cold))
+void __rte_cold
fm10k_txq_vec_setup(struct fm10k_tx_queue *txq)
{
txq->ops = &vec_txq_ops;
}
-int __attribute__((cold))
+int __rte_cold
fm10k_tx_vec_condition_check(struct fm10k_tx_queue *txq)
{
/* Vector TX can't offload any features yet */
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
fm10k_reset_tx_queue(struct fm10k_tx_queue *txq)
{
static const struct fm10k_tx_desc zeroed_desc = {0};
i40e_recv_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_rx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
{
struct i40e_adapter *ad =
return i40e_xmit_pkts_vec;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_tx_function(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
return ret;
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
}
-void __attribute__((cold))
+void __rte_cold
i40e_set_default_pctype_table(struct rte_eth_dev *dev)
{
struct i40e_adapter *ad =
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
return nb_pkts;
}
-void __attribute__((cold))
+void __rte_cold
i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
{
_i40e_rx_queue_release_mbufs_vec(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
{
return i40e_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
{
return 0;
}
-int __attribute__((cold))
+int __rte_cold
i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return i40e_rx_vec_dev_conf_condition_check_default(dev);
return nb_tx;
}
-static void __attribute__((cold))
+static void __rte_cold
iavf_rx_queue_release_mbufs_sse(struct iavf_rx_queue *rxq)
{
_iavf_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
iavf_tx_queue_release_mbufs_sse(struct iavf_tx_queue *txq)
{
_iavf_tx_queue_release_mbufs_vec(txq);
.release_mbufs = iavf_tx_queue_release_mbufs_sse,
};
-int __attribute__((cold))
+int __rte_cold
iavf_txq_vec_setup(struct iavf_tx_queue *txq)
{
txq->ops = &sse_vec_txq_ops;
return 0;
}
-int __attribute__((cold))
+int __rte_cold
iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
{
rxq->ops = &sse_vec_rxq_ops;
return iavf_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
{
return iavf_rx_vec_dev_check_default(dev);
}
-int __attribute__((cold))
+int __rte_cold
iavf_tx_vec_dev_check(struct rte_eth_dev *dev)
{
return iavf_tx_vec_dev_check_default(dev);
return nb_tx;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_rx_function(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
return ret;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function_flag(struct rte_eth_dev *dev, struct ice_tx_queue *txq)
{
struct ice_adapter *ad =
return i;
}
-void __attribute__((cold))
+void __rte_cold
ice_set_tx_function(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
return type_table[ptype];
}
-void __attribute__((cold))
+void __rte_cold
ice_set_default_ptype_table(struct rte_eth_dev *dev)
{
struct ice_adapter *ad =
return nb_tx;
}
-int __attribute__((cold))
+int __rte_cold
ice_rxq_vec_setup(struct ice_rx_queue *rxq)
{
if (!rxq)
return ice_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ice_txq_vec_setup(struct ice_tx_queue __rte_unused *txq)
{
if (!txq)
return 0;
}
-int __attribute__((cold))
+int __rte_cold
ice_rx_vec_dev_check(struct rte_eth_dev *dev)
{
return ice_rx_vec_dev_check_default(dev);
}
-int __attribute__((cold))
+int __rte_cold
ice_tx_vec_dev_check(struct rte_eth_dev *dev)
{
return ice_tx_vec_dev_check_default(dev);
qinfo->conf.tx_deferred_start = txq->deferred_start;
}
-static inline void __attribute__((cold))
+static inline void __rte_cold
ionic_tx_flush(struct ionic_cq *cq)
{
struct ionic_queue *q = cq->bound_q;
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_tx_queue_release(void *tx_queue)
{
struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
ionic_qcq_free(txq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
struct ionic_qcq *txq;
return 0;
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
uint16_t nb_desc, uint32_t socket_id __rte_unused,
const struct rte_eth_txconf *tx_conf)
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
{
struct ionic_qcq *txq;
qinfo->conf.offloads = rxq->offloads;
}
-static void __attribute__((cold))
+static void __rte_cold
ionic_rx_empty(struct ionic_queue *q)
{
struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
}
}
-void __attribute__((cold))
+void __rte_cold
ionic_dev_rx_queue_release(void *rx_queue)
{
struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
ionic_qcq_free(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
uint16_t rx_queue_id,
uint16_t nb_desc,
ionic_q_post(q, true, ionic_rx_clean, mbuf);
}
-static int __attribute__((cold))
+static int __rte_cold
ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
{
struct ionic_queue *q = &rxq->q;
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
return 0;
}
-static inline void __attribute__((cold))
+static inline void __rte_cold
ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
void *service_cb_arg)
{
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
struct ionic_qcq *rxq;
*
**********************************************************************/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs(struct ixgbe_tx_queue *txq)
{
unsigned i;
return ixgbe_tx_done_cleanup_full(txq, free_cnt);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
if (txq != NULL &&
rte_free(txq->sw_ring);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release(struct ixgbe_tx_queue *txq)
{
if (txq != NULL && txq->ops != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_queue_release(void *txq)
{
ixgbe_tx_queue_release(txq);
}
/* (Re)set dynamic ixgbe_tx_queue fields to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
static const union ixgbe_adv_tx_desc zeroed_desc = {{0}};
* the queue parameters. Used in tx_queue_setup by primary process and then
* in dev_init by secondary process when attaching to an existing ethdev.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_tx_function(struct rte_eth_dev *dev, struct ixgbe_tx_queue *txq)
{
/* Use a simple Tx queue (no offloads, no multi segs) if possible */
return tx_offload_capa;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
*
* @m scattered cluster head
*/
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_free_sc_cluster(struct rte_mbuf *m)
{
uint16_t i, nb_segs = m->nb_segs;
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq)
{
unsigned i;
}
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_rx_queue_release(struct ixgbe_rx_queue *rxq)
{
if (rxq != NULL) {
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_rx_queue_release(void *rxq)
{
ixgbe_rx_queue_release(rxq);
* -EINVAL: the preconditions are NOT satisfied and the default Rx burst
* function must be used.
*/
-static inline int __attribute__((cold))
+static inline int __rte_cold
check_rx_burst_bulk_alloc_preconditions(struct ixgbe_rx_queue *rxq)
{
int ret = 0;
}
/* Reset dynamic ixgbe_rx_queue fields back to defaults */
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_rx_queue *rxq)
{
static const union ixgbe_adv_rx_desc zeroed_desc = {{0}};
return offloads;
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
uint16_t queue_idx,
uint16_t nb_desc,
/*
* Set up link loopback for X540/X550 mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_x540_x550(struct ixgbe_hw *hw, bool enable)
{
uint32_t macc;
IXGBE_WRITE_REG(hw, IXGBE_MACC, macc);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_clear_queues(struct rte_eth_dev *dev)
{
unsigned i;
IXGBE_WRITE_FLUSH(hw);
}
-static int __attribute__((cold))
+static int __rte_cold
ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq)
{
struct ixgbe_rx_entry *rxe = rxq->sw_ring;
}
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_set_rx_function(struct rte_eth_dev *dev)
{
uint16_t i, rx_using_sse;
/*
* Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbe_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Set up link for 82599 loopback mode Tx->Rx.
*/
-static inline void __attribute__((cold))
+static inline void __rte_cold
ixgbe_setup_loopback_link_82599(struct ixgbe_hw *hw)
{
PMD_INIT_FUNC_TRACE();
/*
* Start Transmit and Receive Units.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* Start Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Receive Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Start Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* Stop Transmit Units for specified queue.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Receive Unit.
*/
-int __attribute__((cold))
+int __rte_cold
ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Initializes Transmit Unit.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_tx_init(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
/*
* [VF] Start Transmit and Receive Units.
*/
-void __attribute__((cold))
+void __rte_cold
ixgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw;
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
_ixgbe_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
.reset = ixgbe_reset_tx_queue,
};
-int __attribute__((cold))
+int __rte_cold
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
return ixgbe_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
return nb_pkts;
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_queue_release_mbufs_vec(txq);
}
-void __attribute__((cold))
+void __rte_cold
ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq)
{
_ixgbe_rx_queue_release_mbufs_vec(rxq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq)
{
_ixgbe_tx_free_swring_vec(txq);
}
-static void __attribute__((cold))
+static void __rte_cold
ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq)
{
_ixgbe_reset_tx_queue_vec(txq);
.reset = ixgbe_reset_tx_queue,
};
-int __attribute__((cold))
+int __rte_cold
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq)
{
return ixgbe_rxq_vec_setup_default(rxq);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq)
{
return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops);
}
-int __attribute__((cold))
+int __rte_cold
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
{
return ixgbe_rx_vec_dev_conf_condition_check_default(dev);
* @return
* 1 if supported, negative errno value if not.
*/
-int __attribute__((cold))
+int __rte_cold
mlx5_rxq_check_vec_support(struct mlx5_rxq_data *rxq)
{
struct mlx5_rxq_ctrl *ctrl =
* @return
* 1 if supported, negative errno value if not.
*/
-int __attribute__((cold))
+int __rte_cold
mlx5_check_vec_rx_support(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
-int __attribute__((cold))
+int __rte_cold
virtio_rxq_vec_setup(struct virtnet_rx *rxq)
{
uintptr_t p;
*/
#define __rte_hot __attribute__((hot))
+/**
+ * Hint function in the cold path
+ */
+#define __rte_cold __attribute__((cold))
+
/*********** Macros for pointer arithmetic ********/
/**
void __rte_panic(const char *funcname , const char *format, ...)
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
- __attribute__((cold))
+ __rte_cold
#endif
#endif
__attribute__((noreturn))
int rte_log(uint32_t level, uint32_t logtype, const char *format, ...)
#ifdef __GNUC__
#if (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ > 2))
- __attribute__((cold))
+ __rte_cold
#endif
#endif
__rte_format_printf(3, 4);