/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_atq_process_stage_0(struct rte_event *const ev)
{
ev->sub_event_type = 1; /* move to stage 1 (atomic) on the same queue */
return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_process_stage_1(struct test_order *const t,
struct rte_event *const ev, const uint32_t nb_flows,
uint32_t *const expected_flow_seq,
rte_atomic64_sub(outstand_pkts, 1);
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_process_stage_invalid(struct test_order *const t,
struct rte_event *const ev)
{
/* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
order_queue_process_stage_0(struct rte_event *const ev)
{
ev->queue_id = 1; /* q1 atomic queue */
rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores);
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_mark_fwd_latency(struct rte_event *const ev)
{
if (unlikely(ev->sub_event_type == 0)) {
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
const uint8_t nb_stages)
{
printf("%s(): lcore %d dev_id %d port=%d\n", __func__,\
rte_lcore_id(), dev, port)
-static inline __attribute__((always_inline)) int
+static __rte_always_inline int
perf_process_last_stage(struct rte_mempool *const pool,
struct rte_event *const ev, struct worker_data *const w,
void *bufs[], int const buf_sz, uint8_t count)
return count;
}
-static inline __attribute__((always_inline)) uint8_t
+static __rte_always_inline uint8_t
perf_process_last_stage_latency(struct rte_mempool *const pool,
struct rte_event *const ev, struct worker_data *const w,
void *bufs[], int const buf_sz, uint8_t count)
return nb_prod * opt->nb_stages;
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
mark_fwd_latency(struct rte_event *const ev,
const uint8_t nb_stages)
{
}
}
-static inline __attribute__((always_inline)) void
+static __rte_always_inline void
fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list,
const uint8_t nb_stages)
{
return nb_tx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
ice_tx_free_bufs(struct ice_tx_queue *txq)
{
struct ice_tx_entry *txep;
#define RTE_XABORT_NESTED (1 << 5)
#define RTE_XABORT_CODE(x) (((x) >> 24) & 0xff)
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
unsigned int rte_xbegin(void)
{
unsigned int ret = RTE_XBEGIN_STARTED;
return ret;
}
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
void rte_xend(void)
{
asm volatile(".byte 0x0f,0x01,0xd5" ::: "memory");
asm volatile(".byte 0xc6,0xf8,%P0" :: "i" (status) : "memory"); \
} while (0)
-static __attribute__((__always_inline__)) inline
+static __rte_always_inline
int rte_xtest(void)
{
unsigned char out;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
exit_training_state(struct priority_worker *poll_stats)
{
RTE_SET_USED(poll_stats);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_training_state(struct priority_worker *poll_stats)
{
poll_stats->iter_counter = 0;
poll_stats->queue_state = TRAINING;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_normal_state(struct priority_worker *poll_stats)
{
/* Clear the averages arrays and strs */
poll_stats->thresh[HGH].threshold_percent = high_to_med_threshold;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_busy_state(struct priority_worker *poll_stats)
{
memset(poll_stats->edpi_av, 0, sizeof(poll_stats->edpi_av));
set_power_freq(poll_stats->lcore_id, HGH, false);
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
enter_purge_state(struct priority_worker *poll_stats)
{
poll_stats->iter_counter = 0;
poll_stats->queue_state = LOW_PURGE;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
set_state(struct priority_worker *poll_stats,
enum queue_state new_state)
{
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
set_policy(struct priority_worker *poll_stats,
struct ep_policy *policy)
{
}
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
update_stats(struct priority_worker *poll_stats)
{
uint64_t tot_edpi = 0, tot_ppi = 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_stats_normal(struct priority_worker *poll_stats)
{
uint32_t percent;