#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
#endif
-#ifndef __hot
-#define __hot __attribute__((hot))
-#endif
-
#define MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
struct cpt_qp_meta_info {
return prep_req;
}
-static __rte_always_inline void *__hot
+static __rte_always_inline void *__rte_hot
cpt_fc_enc_hmac_prep(uint32_t flags, uint64_t d_offs, uint64_t d_lens,
fc_params_t *fc_params, void *op)
{
cpt_fill_req_comp_addr(req, caddr);
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
cpt_enqueue_rsa_op(struct rte_crypto_op *op,
struct asym_op_params *params,
struct cpt_asym_sess_misc *sess)
cpt_fill_req_comp_addr(req, caddr);
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
cpt_enqueue_ecdsa_op(struct rte_crypto_op *op,
struct asym_op_params *params,
struct cpt_asym_sess_misc *sess,
int dpaax_iova_table_update(phys_addr_t paddr, void *vaddr, size_t length);
void dpaax_iova_table_dump(void);
-static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __attribute__((hot));
+static inline void *dpaax_iova_table_get_va(phys_addr_t paddr) __rte_hot;
static inline void *
dpaax_iova_table_get_va(phys_addr_t paddr) {
(~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
#endif
-/* Compiler attributes */
-#ifndef __hot
-#define __hot __attribute__((hot))
-#endif
-
/* Intra device related functions */
struct otx2_npa_lf;
struct otx2_idev_cfg {
}
}
-static void __attribute__((hot))
+static void __rte_hot
dpaa2_sec_process_parallel_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
rte_mempool_put(sess_mp, priv);
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx_cpt_request_enqueue(struct cpt_instance *instance,
struct pending_queue *pqueue,
void *req)
return 0;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_asym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_sym(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
return 0;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single_sym_sessless(struct cpt_instance *instance,
struct rte_crypto_op *op,
struct pending_queue *pqueue)
#define OP_TYPE_SYM 0
#define OP_TYPE_ASYM 1
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx_cpt_enq_single(struct cpt_instance *inst,
struct rte_crypto_op *op,
struct pending_queue *pqueue,
return -ENOTSUP;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_enqueue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
{
ecpm->r.y.length = prime_len;
}
-static __rte_always_inline void __hot
+static __rte_always_inline void __rte_hot
otx_cpt_asym_post_process(struct rte_crypto_op *cop,
struct cpt_request_info *req)
{
}
}
-static __rte_always_inline void __hot
+static __rte_always_inline void __rte_hot
otx_cpt_dequeue_post_process(struct rte_crypto_op *cop, uintptr_t *rsp,
const uint8_t op_type)
{
return;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
otx_cpt_pkt_dequeue(void *qptr, struct rte_crypto_op **ops, uint16_t nb_ops,
const uint8_t op_type)
{
rte_mempool_put(pool, priv);
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_req(const struct otx2_cpt_qp *qp,
struct pending_queue *pend_q,
struct cpt_request_info *req)
return 0;
}
-static __rte_always_inline int32_t __hot
+static __rte_always_inline int32_t __rte_hot
otx2_cpt_enqueue_asym(struct otx2_cpt_qp *qp,
struct rte_crypto_op *op,
struct pending_queue *pend_q)
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx2_cpt_enqueue_sym(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct pending_queue *pend_q)
{
return ret;
}
-static __rte_always_inline int __hot
+static __rte_always_inline int __rte_hot
otx2_cpt_enqueue_sym_sessless(struct otx2_cpt_qp *qp, struct rte_crypto_op *op,
struct pending_queue *pend_q)
{
ssows_swtag_untag(ws);
}
-__rte_always_inline uint16_t __hot
+__rte_always_inline uint16_t __rte_hot
ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
}
}
-__rte_always_inline uint16_t __hot
+__rte_always_inline uint16_t __rte_hot
ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
{
struct ssows *ws = port;
return ret;
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
uint64_t timeout_ticks)
{
return ssows_deq(port, ev, timeout_ticks);
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
uint64_t timeout_ticks)
{
return ssows_deq_timeout(port, ev, timeout_ticks);
}
-__rte_always_inline uint16_t __hot
+__rte_always_inline uint16_t __rte_hot
ssows_enq(void *port, const struct rte_event *ev)
{
struct ssows *ws = port;
return ret;
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
RTE_SET_USED(nb_events);
return ssows_enq(port, ev);
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
uint16_t i;
return nb_events;
}
-uint16_t __hot
+uint16_t __rte_hot
ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
{
struct ssows *ws = port;
SSO_SYNC_EMPTY
};
-#ifndef __hot
-#define __hot __attribute__((hot))
-#endif
-
/* SSO Operations */
static __rte_always_inline struct rte_mbuf *
}
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return ret; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
uint16_t nb_events, \
uint64_t timeout_ticks) \
return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
ws->lookup_mem); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return ret; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_enq(void *port, const struct rte_event *ev)
{
struct otx2_ssogws *ws = port;
return 1;
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
return otx2_ssogws_enq(port, ev);
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
return nb_events;
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
}
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
uint16_t nb_events) \
{ \
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
uint16_t nb_events) \
{ \
}
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
{
struct otx2_ssogws_dual *ws = port;
return 1;
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
return otx2_ssogws_dual_enq(port, ev);
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
return nb_events;
}
-uint16_t __hot
+uint16_t __rte_hot
otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
uint16_t nb_events)
{
}
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return gw; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_burst_ ##name(void *port, struct rte_event ev[], \
uint16_t nb_events, \
uint64_t timeout_ticks) \
return otx2_ssogws_dual_deq_ ##name(port, ev, timeout_ticks); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_timeout_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return gw; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
timeout_ticks); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
uint64_t timeout_ticks) \
{ \
return gw; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
timeout_ticks); \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
struct rte_event *ev, \
uint64_t timeout_ticks) \
return gw; \
} \
\
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
struct rte_event ev[], \
uint16_t nb_events, \
#undef R
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-uint16_t __hot \
+uint16_t __rte_hot \
otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
struct rte_event ev[], \
uint16_t nb_events) \
#include "otx2_mempool.h"
-static int __hot
+static int __rte_hot
otx2_npa_enq(struct rte_mempool *mp, void * const *obj_table, unsigned int n)
{
unsigned int index; const uint64_t aura_handle = mp->pool_id;
}
}
-static __rte_noinline int __hot
+static __rte_noinline int __rte_hot
otx2_npa_deq_arm64(struct rte_mempool *mp, void **obj_table, unsigned int n)
{
const int64_t wdata = npa_lf_aura_handle_to_aura(mp->pool_id);
#else
-static inline int __hot
+static inline int __rte_hot
otx2_npa_deq(struct rte_mempool *mp, void **obj_table, unsigned int n)
{
const int64_t wdata = npa_lf_aura_handle_to_aura(mp->pool_id);
#include "dpaa2_ethdev.h"
#include "base/dpaa2_hw_dpni_annot.h"
-static inline uint32_t __attribute__((hot))
+static inline uint32_t __rte_hot
dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
struct dpaa2_annot_hdr *annotation);
DPAA2_RESET_FD_FLC(_fd); \
} while (0)
-static inline void __attribute__((hot))
+static inline void __rte_hot
dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd)
{
struct dpaa2_annot_hdr *annotation;
frc, m->packet_type, m->ol_flags);
}
-static inline uint32_t __attribute__((hot))
+static inline uint32_t __rte_hot
dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
struct dpaa2_annot_hdr *annotation)
{
return pkt_type;
}
-static inline uint32_t __attribute__((hot))
+static inline uint32_t __rte_hot
dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
{
struct dpaa2_annot_hdr *annotation =
return dpaa2_dev_rx_parse_slow(mbuf, annotation);
}
-static inline struct rte_mbuf *__attribute__((hot))
+static inline struct rte_mbuf *__rte_hot
eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
int port_id)
{
return (void *)first_seg;
}
-static inline struct rte_mbuf *__attribute__((hot))
+static inline struct rte_mbuf *__rte_hot
eth_fd_to_mbuf(const struct qbman_fd *fd,
int port_id)
{
return mbuf;
}
-static int __rte_noinline __attribute__((hot))
+static int __rte_noinline __rte_hot
eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid) __rte_unused;
-static void __rte_noinline __attribute__((hot))
+static void __rte_noinline __rte_hot
eth_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
}
}
-static inline int __attribute__((hot))
+static inline int __rte_hot
eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
struct qbman_fd *fd, uint16_t bpid)
{
return num_rx;
}
-void __attribute__((hot))
+void __rte_hot
dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
qbman_swp_dqrr_consume(swp, dq);
}
-void __attribute__((hot))
+void __rte_hot
dpaa2_dev_process_atomic_event(struct qbman_swp *swp __rte_unused,
const struct qbman_fd *fd,
const struct qbman_result *dq,
DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
}
-void __attribute__((hot))
+void __rte_hot
dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
const struct qbman_fd *fd,
const struct qbman_result *dq,
}
-static inline void __attribute__((hot))
+static inline void __rte_hot
enetc_dev_rx_parse(struct rte_mbuf *m, uint16_t parse_results)
{
ENETC_PMD_DP_DEBUG("parse summary = 0x%x ", parse_results);
#include "octeontx_rxtx.h"
#include "octeontx_logs.h"
-uint16_t __hot
+uint16_t __rte_hot
octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int count;
return count; /* return number of pkts transmitted */
}
-uint16_t __hot
+uint16_t __rte_hot
octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct octeontx_rxq *rxq;
#include <rte_ethdev_driver.h>
-#ifndef __hot
-#define __hot __attribute__((hot))
-#endif
-
/* Packet type table */
#define PTYPE_SIZE OCCTX_PKI_LTYPE_LAST
#endif
#define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
return nix_recv_pkts(rx_queue, rx_pkts, pkts, (flags)); \
} \
\
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_mseg_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
(flags) | NIX_RX_MULTI_SEG_F); \
} \
\
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_recv_pkts_vec_ ## name(void *rx_queue, \
struct rte_mbuf **rx_pkts, uint16_t pkts) \
{ \
#endif
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_xmit_pkts_ ## name(void *tx_queue, \
struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \
struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
#undef T
#define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
-static uint16_t __rte_noinline __hot \
+static uint16_t __rte_noinline __rte_hot \
otx2_nix_xmit_pkts_vec_ ## name(void *tx_queue, \
struct rte_mbuf **tx_pkts, uint16_t pkts) \
{ \
#include "nicvf_rxtx.h"
#include "nicvf_logs.h"
-static inline void __hot
+static inline void __rte_hot
fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
{
/* Local variable sqe to avoid read from sq desc memory*/
entry->buff[0] = sqe.buff[0];
}
-static inline void __hot
+static inline void __rte_hot
fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
struct rte_mbuf *pkt)
{
entry->buff[1] = 0ULL;
}
-void __hot
+void __rte_hot
nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
int j = 0;
NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
}
-void __hot
+void __rte_hot
nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
uint32_t n = 0;
NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
}
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_free_tx_desc(struct nicvf_txq *sq)
{
return ((sq->head - sq->tail - 1) & sq->qlen_mask);
/* Send Header + Packet */
#define TX_DESC_PER_PKT 2
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
return free_desc;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
int i;
return i;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts)
{
[L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
};
-static inline uint32_t __hot
+static inline uint32_t __rte_hot
nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
{
return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
}
-static inline uint64_t __hot
+static inline uint64_t __rte_hot
nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0)
{
static const uint64_t flag_table[3] __rte_cache_aligned = {
return flag_table[idx];
}
-static inline int __hot
+static inline int __rte_hot
nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
{
int i;
return to_fill;
}
-static inline int32_t __hot
+static inline int32_t __rte_hot
nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
int32_t available_space)
{
return RTE_MIN(nb_pkts, available_space);
}
-static inline void __hot
+static inline void __rte_hot
nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
struct rte_mbuf *pkt)
{
return to_process;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
uint64_t mbuf_phys_off,
struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
return nb_segs;
}
-static __rte_always_inline uint16_t __hot
+static __rte_always_inline uint16_t __rte_hot
nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts, const uint32_t flag)
{
return to_process;
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_CKSUM);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
{
NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP);
}
-uint16_t __hot
+uint16_t __rte_hot
nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
-#ifndef __hot
-#define __hot __attribute__((hot))
-#endif
-
#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
static inline uint16_t __attribute__((const))
nicvf_frag_num(uint16_t i)
return (i & ~3) + 3 - (i & 3);
}
-static inline void __hot
+static inline void __rte_hot
fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
{
/* Local variable sqe to avoid read from sq desc memory*/
return i;
}
-static inline void __hot
+static inline void __rte_hot
fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
{
entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 |
*/
#define __rte_noinline __attribute__((noinline))
+/**
+ * Hint function in the hot path
+ */
+#define __rte_hot __attribute__((hot))
+
/*********** Macros for pointer arithmetic ********/
/**