/* constructor */
-void lthread_sched_ctor(void) __attribute__ ((constructor));
-void lthread_sched_ctor(void)
+RTE_INIT(lthread_sched_ctor)
{
memset(schedcore, 0, sizeof(schedcore));
rte_atomic16_init(&num_schedulers);
struct lthread_sched *new_sched;
unsigned lcoreid = rte_lcore_id();
- LTHREAD_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
+ RTE_ASSERT(stack_size <= LTHREAD_MAX_STACK_SIZE);
if (stack_size == 0)
stack_size = LTHREAD_MAX_STACK_SIZE;
/*
* Resume a suspended lthread
*/
-static inline void
-_lthread_resume(struct lthread *lt) __attribute__ ((always_inline));
+static __rte_always_inline void
+_lthread_resume(struct lthread *lt);
static inline void _lthread_resume(struct lthread *lt)
{
struct lthread_sched *sched = THIS_SCHED;
* Return the scheduler for this lcore
*
*/
-struct lthread_sched *_lthread_sched_get(int lcore_id)
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id)
{
- if (lcore_id > LTHREAD_MAX_LCORES)
- return NULL;
- return schedcore[lcore_id];
+ struct lthread_sched *res = NULL;
+
+ if (lcore_id < LTHREAD_MAX_LCORES)
+ res = schedcore[lcore_id];
+
+ return res;
}
/*
struct lthread *lt = THIS_LTHREAD;
struct lthread_sched *dest_sched;
- if (unlikely(lcoreid > LTHREAD_MAX_LCORES))
+ if (unlikely(lcoreid >= LTHREAD_MAX_LCORES))
return POSIX_ERRNO(EINVAL);
-
DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);
dest_sched = schedcore[lcoreid];