a8ca7b8cc4ed60c8893cc55e0d90dbc060a0e0c1
[dpdk.git] / lib / librte_eal / common / eal_common_proc.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <dirent.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <fnmatch.h>
9 #include <inttypes.h>
10 #include <libgen.h>
11 #include <limits.h>
12 #include <pthread.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/file.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/un.h>
21 #include <unistd.h>
22
23 #include <rte_common.h>
24 #include <rte_cycles.h>
25 #include <rte_eal.h>
26 #include <rte_errno.h>
27 #include <rte_lcore.h>
28 #include <rte_log.h>
29 #include <rte_tailq.h>
30
31 #include "eal_private.h"
32 #include "eal_filesystem.h"
33 #include "eal_internal_cfg.h"
34
35 static int mp_fd = -1;
36 static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
37 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
38 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
39
40 struct action_entry {
41         TAILQ_ENTRY(action_entry) next;
42         char action_name[RTE_MP_MAX_NAME_LEN];
43         rte_mp_t action;
44 };
45
46 /** Double linked list of actions. */
47 TAILQ_HEAD(action_entry_list, action_entry);
48
49 static struct action_entry_list action_entry_list =
50         TAILQ_HEAD_INITIALIZER(action_entry_list);
51
52 enum mp_type {
53         MP_MSG, /* Share message with peers, will not block */
54         MP_REQ, /* Request for information, Will block for a reply */
55         MP_REP, /* Response to previously-received request */
56         MP_IGN, /* Response telling requester to ignore this response */
57 };
58
59 struct mp_msg_internal {
60         int type;
61         struct rte_mp_msg msg;
62 };
63
64 struct async_request_param {
65         rte_mp_async_reply_t clb;
66         struct rte_mp_reply user_reply;
67         struct timespec end;
68         int n_responses_processed;
69 };
70
71 struct pending_request {
72         TAILQ_ENTRY(pending_request) next;
73         enum {
74                 REQUEST_TYPE_SYNC,
75                 REQUEST_TYPE_ASYNC
76         } type;
77         char dst[PATH_MAX];
78         struct rte_mp_msg *request;
79         struct rte_mp_msg *reply;
80         int reply_received;
81         RTE_STD_C11
82         union {
83                 struct {
84                         struct async_request_param *param;
85                 } async;
86                 struct {
87                         pthread_cond_t cond;
88                 } sync;
89         };
90 };
91
92 TAILQ_HEAD(pending_request_list, pending_request);
93
94 static struct {
95         struct pending_request_list requests;
96         pthread_mutex_t lock;
97         pthread_cond_t async_cond;
98 } pending_requests = {
99         .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
100         .lock = PTHREAD_MUTEX_INITIALIZER,
101         .async_cond = PTHREAD_COND_INITIALIZER
102         /**< used in async requests only */
103 };
104
105 /* forward declarations */
106 static int
107 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
108
109
110 static struct pending_request *
111 find_sync_request(const char *dst, const char *act_name)
112 {
113         struct pending_request *r;
114
115         TAILQ_FOREACH(r, &pending_requests.requests, next) {
116                 if (!strcmp(r->dst, dst) &&
117                     !strcmp(r->request->name, act_name))
118                         break;
119         }
120
121         return r;
122 }
123
124 static void
125 create_socket_path(const char *name, char *buf, int len)
126 {
127         const char *prefix = eal_mp_socket_path();
128
129         if (strlen(name) > 0)
130                 snprintf(buf, len, "%s_%s", prefix, name);
131         else
132                 snprintf(buf, len, "%s", prefix);
133 }
134
135 int
136 rte_eal_primary_proc_alive(const char *config_file_path)
137 {
138         int config_fd;
139
140         if (config_file_path)
141                 config_fd = open(config_file_path, O_RDONLY);
142         else {
143                 const char *path;
144
145                 path = eal_runtime_config_path();
146                 config_fd = open(path, O_RDONLY);
147         }
148         if (config_fd < 0)
149                 return 0;
150
151         int ret = lockf(config_fd, F_TEST, 0);
152         close(config_fd);
153
154         return !!ret;
155 }
156
157 static struct action_entry *
158 find_action_entry_by_name(const char *name)
159 {
160         struct action_entry *entry;
161
162         TAILQ_FOREACH(entry, &action_entry_list, next) {
163                 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
164                         break;
165         }
166
167         return entry;
168 }
169
170 static int
171 validate_action_name(const char *name)
172 {
173         if (name == NULL) {
174                 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
175                 rte_errno = EINVAL;
176                 return -1;
177         }
178         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
179                 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
180                 rte_errno = EINVAL;
181                 return -1;
182         }
183         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
184                 rte_errno = E2BIG;
185                 return -1;
186         }
187         return 0;
188 }
189
190 int __rte_experimental
191 rte_mp_action_register(const char *name, rte_mp_t action)
192 {
193         struct action_entry *entry;
194
195         if (validate_action_name(name))
196                 return -1;
197
198         entry = malloc(sizeof(struct action_entry));
199         if (entry == NULL) {
200                 rte_errno = ENOMEM;
201                 return -1;
202         }
203         strcpy(entry->action_name, name);
204         entry->action = action;
205
206         pthread_mutex_lock(&mp_mutex_action);
207         if (find_action_entry_by_name(name) != NULL) {
208                 pthread_mutex_unlock(&mp_mutex_action);
209                 rte_errno = EEXIST;
210                 free(entry);
211                 return -1;
212         }
213         TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
214         pthread_mutex_unlock(&mp_mutex_action);
215         return 0;
216 }
217
218 void __rte_experimental
219 rte_mp_action_unregister(const char *name)
220 {
221         struct action_entry *entry;
222
223         if (validate_action_name(name))
224                 return;
225
226         pthread_mutex_lock(&mp_mutex_action);
227         entry = find_action_entry_by_name(name);
228         if (entry == NULL) {
229                 pthread_mutex_unlock(&mp_mutex_action);
230                 return;
231         }
232         TAILQ_REMOVE(&action_entry_list, entry, next);
233         pthread_mutex_unlock(&mp_mutex_action);
234         free(entry);
235 }
236
237 static int
238 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
239 {
240         int msglen;
241         struct iovec iov;
242         struct msghdr msgh;
243         char control[CMSG_SPACE(sizeof(m->msg.fds))];
244         struct cmsghdr *cmsg;
245         int buflen = sizeof(*m) - sizeof(m->msg.fds);
246
247         memset(&msgh, 0, sizeof(msgh));
248         iov.iov_base = m;
249         iov.iov_len  = buflen;
250
251         msgh.msg_name = s;
252         msgh.msg_namelen = sizeof(*s);
253         msgh.msg_iov = &iov;
254         msgh.msg_iovlen = 1;
255         msgh.msg_control = control;
256         msgh.msg_controllen = sizeof(control);
257
258         msglen = recvmsg(mp_fd, &msgh, 0);
259         if (msglen < 0) {
260                 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
261                 return -1;
262         }
263
264         if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
265                 RTE_LOG(ERR, EAL, "truncted msg\n");
266                 return -1;
267         }
268
269         /* read auxiliary FDs if any */
270         for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
271                 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
272                 if ((cmsg->cmsg_level == SOL_SOCKET) &&
273                         (cmsg->cmsg_type == SCM_RIGHTS)) {
274                         memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
275                         break;
276                 }
277         }
278
279         return 0;
280 }
281
282 static void
283 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
284 {
285         struct pending_request *sync_req;
286         struct action_entry *entry;
287         struct rte_mp_msg *msg = &m->msg;
288         rte_mp_t action = NULL;
289
290         RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
291
292         if (m->type == MP_REP || m->type == MP_IGN) {
293                 pthread_mutex_lock(&pending_requests.lock);
294                 sync_req = find_sync_request(s->sun_path, msg->name);
295                 if (sync_req) {
296                         memcpy(sync_req->reply, msg, sizeof(*msg));
297                         /* -1 indicates that we've been asked to ignore */
298                         sync_req->reply_received = m->type == MP_REP ? 1 : -1;
299
300                         if (sync_req->type == REQUEST_TYPE_SYNC)
301                                 pthread_cond_signal(&sync_req->sync.cond);
302                         else if (sync_req->type == REQUEST_TYPE_ASYNC)
303                                 pthread_cond_signal(
304                                         &pending_requests.async_cond);
305                 } else
306                         RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
307                 pthread_mutex_unlock(&pending_requests.lock);
308                 return;
309         }
310
311         pthread_mutex_lock(&mp_mutex_action);
312         entry = find_action_entry_by_name(msg->name);
313         if (entry != NULL)
314                 action = entry->action;
315         pthread_mutex_unlock(&mp_mutex_action);
316
317         if (!action) {
318                 if (m->type == MP_REQ && !internal_config.init_complete) {
319                         /* if this is a request, and init is not yet complete,
320                          * and callback wasn't registered, we should tell the
321                          * requester to ignore our existence because we're not
322                          * yet ready to process this request.
323                          */
324                         struct rte_mp_msg dummy;
325                         memset(&dummy, 0, sizeof(dummy));
326                         mp_send(&dummy, s->sun_path, MP_IGN);
327                 } else {
328                         RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
329                                 msg->name);
330                 }
331         } else if (action(msg, s->sun_path) < 0) {
332                 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
333         }
334 }
335
336 static void *
337 mp_handle(void *arg __rte_unused)
338 {
339         struct mp_msg_internal msg;
340         struct sockaddr_un sa;
341
342         while (1) {
343                 if (read_msg(&msg, &sa) == 0)
344                         process_msg(&msg, &sa);
345         }
346
347         return NULL;
348 }
349
350 static int
351 timespec_cmp(const struct timespec *a, const struct timespec *b)
352 {
353         if (a->tv_sec < b->tv_sec)
354                 return -1;
355         if (a->tv_sec > b->tv_sec)
356                 return 1;
357         if (a->tv_nsec < b->tv_nsec)
358                 return -1;
359         if (a->tv_nsec > b->tv_nsec)
360                 return 1;
361         return 0;
362 }
363
364 enum async_action {
365         ACTION_NONE, /**< don't do anything */
366         ACTION_FREE, /**< free the action entry, but don't trigger callback */
367         ACTION_TRIGGER /**< trigger callback, then free action entry */
368 };
369
370 static enum async_action
371 process_async_request(struct pending_request *sr, const struct timespec *now)
372 {
373         struct async_request_param *param;
374         struct rte_mp_reply *reply;
375         bool timeout, received, last_msg;
376
377         param = sr->async.param;
378         reply = &param->user_reply;
379
380         /* did we timeout? */
381         timeout = timespec_cmp(&param->end, now) <= 0;
382
383         /* did we receive a response? */
384         received = sr->reply_received != 0;
385
386         /* if we didn't time out, and we didn't receive a response, ignore */
387         if (!timeout && !received)
388                 return ACTION_NONE;
389
390         /* if we received a response, adjust relevant data and copy mesasge. */
391         if (sr->reply_received == 1 && sr->reply) {
392                 struct rte_mp_msg *msg, *user_msgs, *tmp;
393
394                 msg = sr->reply;
395                 user_msgs = reply->msgs;
396
397                 tmp = realloc(user_msgs, sizeof(*msg) *
398                                 (reply->nb_received + 1));
399                 if (!tmp) {
400                         RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
401                                 sr->dst, sr->request->name);
402                         /* this entry is going to be removed and its message
403                          * dropped, but we don't want to leak memory, so
404                          * continue.
405                          */
406                 } else {
407                         user_msgs = tmp;
408                         reply->msgs = user_msgs;
409                         memcpy(&user_msgs[reply->nb_received],
410                                         msg, sizeof(*msg));
411                         reply->nb_received++;
412                 }
413
414                 /* mark this request as processed */
415                 param->n_responses_processed++;
416         } else if (sr->reply_received == -1) {
417                 /* we were asked to ignore this process */
418                 reply->nb_sent--;
419         }
420         free(sr->reply);
421
422         last_msg = param->n_responses_processed == reply->nb_sent;
423
424         return last_msg ? ACTION_TRIGGER : ACTION_FREE;
425 }
426
427 static void
428 trigger_async_action(struct pending_request *sr)
429 {
430         struct async_request_param *param;
431         struct rte_mp_reply *reply;
432
433         param = sr->async.param;
434         reply = &param->user_reply;
435
436         param->clb(sr->request, reply);
437
438         /* clean up */
439         free(sr->async.param->user_reply.msgs);
440         free(sr->async.param);
441         free(sr->request);
442 }
443
444 static struct pending_request *
445 check_trigger(struct timespec *ts)
446 {
447         struct pending_request *next, *cur, *trigger = NULL;
448
449         TAILQ_FOREACH_SAFE(cur, &pending_requests.requests, next, next) {
450                 enum async_action action;
451                 if (cur->type != REQUEST_TYPE_ASYNC)
452                         continue;
453
454                 action = process_async_request(cur, ts);
455                 if (action == ACTION_FREE) {
456                         TAILQ_REMOVE(&pending_requests.requests, cur, next);
457                         free(cur);
458                 } else if (action == ACTION_TRIGGER) {
459                         TAILQ_REMOVE(&pending_requests.requests, cur, next);
460                         trigger = cur;
461                         break;
462                 }
463         }
464         return trigger;
465 }
466
467 static void
468 wait_for_async_messages(void)
469 {
470         struct pending_request *sr;
471         struct timespec timeout;
472         bool timedwait = false;
473         bool nowait = false;
474         int ret;
475
476         /* scan through the list and see if there are any timeouts that
477          * are earlier than our current timeout.
478          */
479         TAILQ_FOREACH(sr, &pending_requests.requests, next) {
480                 if (sr->type != REQUEST_TYPE_ASYNC)
481                         continue;
482                 if (!timedwait || timespec_cmp(&sr->async.param->end,
483                                 &timeout) < 0) {
484                         memcpy(&timeout, &sr->async.param->end,
485                                 sizeof(timeout));
486                         timedwait = true;
487                 }
488
489                 /* sometimes, we don't even wait */
490                 if (sr->reply_received) {
491                         nowait = true;
492                         break;
493                 }
494         }
495
496         if (nowait)
497                 return;
498
499         do {
500                 ret = timedwait ?
501                         pthread_cond_timedwait(
502                                 &pending_requests.async_cond,
503                                 &pending_requests.lock,
504                                 &timeout) :
505                         pthread_cond_wait(
506                                 &pending_requests.async_cond,
507                                 &pending_requests.lock);
508         } while (ret != 0 && ret != ETIMEDOUT);
509
510         /* we've been woken up or timed out */
511 }
512
513 static void *
514 async_reply_handle(void *arg __rte_unused)
515 {
516         struct timeval now;
517         struct timespec ts_now;
518         while (1) {
519                 struct pending_request *trigger = NULL;
520
521                 pthread_mutex_lock(&pending_requests.lock);
522
523                 /* we exit this function holding the lock */
524                 wait_for_async_messages();
525
526                 if (gettimeofday(&now, NULL) < 0) {
527                         RTE_LOG(ERR, EAL, "Cannot get current time\n");
528                         break;
529                 }
530                 ts_now.tv_nsec = now.tv_usec * 1000;
531                 ts_now.tv_sec = now.tv_sec;
532
533                 do {
534                         trigger = check_trigger(&ts_now);
535                         /* unlock request list */
536                         pthread_mutex_unlock(&pending_requests.lock);
537
538                         if (trigger) {
539                                 trigger_async_action(trigger);
540                                 free(trigger);
541
542                                 /* we've triggered a callback, but there may be
543                                  * more, so lock the list and check again.
544                                  */
545                                 pthread_mutex_lock(&pending_requests.lock);
546                         }
547                 } while (trigger);
548         }
549
550         RTE_LOG(ERR, EAL, "ERROR: asynchronous requests disabled\n");
551
552         return NULL;
553 }
554
555 static int
556 open_socket_fd(void)
557 {
558         char peer_name[PATH_MAX] = {0};
559         struct sockaddr_un un;
560
561         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
562                 snprintf(peer_name, sizeof(peer_name),
563                                 "%d_%"PRIx64, getpid(), rte_rdtsc());
564
565         mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
566         if (mp_fd < 0) {
567                 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
568                 return -1;
569         }
570
571         memset(&un, 0, sizeof(un));
572         un.sun_family = AF_UNIX;
573
574         create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
575
576         unlink(un.sun_path); /* May still exist since last run */
577
578         if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
579                 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
580                         un.sun_path, strerror(errno));
581                 close(mp_fd);
582                 return -1;
583         }
584
585         RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
586         return mp_fd;
587 }
588
589 static int
590 unlink_sockets(const char *filter)
591 {
592         int dir_fd;
593         DIR *mp_dir;
594         struct dirent *ent;
595
596         mp_dir = opendir(mp_dir_path);
597         if (!mp_dir) {
598                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
599                 return -1;
600         }
601         dir_fd = dirfd(mp_dir);
602
603         while ((ent = readdir(mp_dir))) {
604                 if (fnmatch(filter, ent->d_name, 0) == 0)
605                         unlinkat(dir_fd, ent->d_name, 0);
606         }
607
608         closedir(mp_dir);
609         return 0;
610 }
611
612 int
613 rte_mp_channel_init(void)
614 {
615         char thread_name[RTE_MAX_THREAD_NAME_LEN];
616         char path[PATH_MAX];
617         int dir_fd;
618         pthread_t mp_handle_tid, async_reply_handle_tid;
619
620         /* create filter path */
621         create_socket_path("*", path, sizeof(path));
622         snprintf(mp_filter, sizeof(mp_filter), "%s", basename(path));
623
624         /* path may have been modified, so recreate it */
625         create_socket_path("*", path, sizeof(path));
626         snprintf(mp_dir_path, sizeof(mp_dir_path), "%s", dirname(path));
627
628         /* lock the directory */
629         dir_fd = open(mp_dir_path, O_RDONLY);
630         if (dir_fd < 0) {
631                 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
632                         mp_dir_path, strerror(errno));
633                 return -1;
634         }
635
636         if (flock(dir_fd, LOCK_EX)) {
637                 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
638                         mp_dir_path, strerror(errno));
639                 close(dir_fd);
640                 return -1;
641         }
642
643         if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
644                         unlink_sockets(mp_filter)) {
645                 RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
646                 close(dir_fd);
647                 return -1;
648         }
649
650         if (open_socket_fd() < 0) {
651                 close(dir_fd);
652                 return -1;
653         }
654
655         if (pthread_create(&mp_handle_tid, NULL, mp_handle, NULL) < 0) {
656                 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
657                         strerror(errno));
658                 close(mp_fd);
659                 mp_fd = -1;
660                 return -1;
661         }
662
663         if (pthread_create(&async_reply_handle_tid, NULL,
664                         async_reply_handle, NULL) < 0) {
665                 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
666                         strerror(errno));
667                 close(mp_fd);
668                 close(dir_fd);
669                 mp_fd = -1;
670                 return -1;
671         }
672
673         /* try best to set thread name */
674         snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "rte_mp_handle");
675         rte_thread_setname(mp_handle_tid, thread_name);
676
677         /* try best to set thread name */
678         snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "rte_mp_async_handle");
679         rte_thread_setname(async_reply_handle_tid, thread_name);
680
681         /* unlock the directory */
682         flock(dir_fd, LOCK_UN);
683         close(dir_fd);
684
685         return 0;
686 }
687
688 /**
689  * Return -1, as fail to send message and it's caused by the local side.
690  * Return 0, as fail to send message and it's caused by the remote side.
691  * Return 1, as succeed to send message.
692  *
693  */
694 static int
695 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
696 {
697         int snd;
698         struct iovec iov;
699         struct msghdr msgh;
700         struct cmsghdr *cmsg;
701         struct sockaddr_un dst;
702         struct mp_msg_internal m;
703         int fd_size = msg->num_fds * sizeof(int);
704         char control[CMSG_SPACE(fd_size)];
705
706         m.type = type;
707         memcpy(&m.msg, msg, sizeof(*msg));
708
709         memset(&dst, 0, sizeof(dst));
710         dst.sun_family = AF_UNIX;
711         snprintf(dst.sun_path, sizeof(dst.sun_path), "%s", dst_path);
712
713         memset(&msgh, 0, sizeof(msgh));
714         memset(control, 0, sizeof(control));
715
716         iov.iov_base = &m;
717         iov.iov_len = sizeof(m) - sizeof(msg->fds);
718
719         msgh.msg_name = &dst;
720         msgh.msg_namelen = sizeof(dst);
721         msgh.msg_iov = &iov;
722         msgh.msg_iovlen = 1;
723         msgh.msg_control = control;
724         msgh.msg_controllen = sizeof(control);
725
726         cmsg = CMSG_FIRSTHDR(&msgh);
727         cmsg->cmsg_len = CMSG_LEN(fd_size);
728         cmsg->cmsg_level = SOL_SOCKET;
729         cmsg->cmsg_type = SCM_RIGHTS;
730         memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
731
732         do {
733                 snd = sendmsg(mp_fd, &msgh, 0);
734         } while (snd < 0 && errno == EINTR);
735
736         if (snd < 0) {
737                 rte_errno = errno;
738                 /* Check if it caused by peer process exits */
739                 if (errno == ECONNREFUSED &&
740                                 rte_eal_process_type() == RTE_PROC_PRIMARY) {
741                         unlink(dst_path);
742                         return 0;
743                 }
744                 if (errno == ENOBUFS) {
745                         RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
746                                 dst_path);
747                         return 0;
748                 }
749                 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
750                         dst_path, strerror(errno));
751                 return -1;
752         }
753
754         return 1;
755 }
756
757 static int
758 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
759 {
760         int dir_fd, ret = 0;
761         DIR *mp_dir;
762         struct dirent *ent;
763
764         if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
765                 peer = eal_mp_socket_path();
766
767         if (peer) {
768                 if (send_msg(peer, msg, type) < 0)
769                         return -1;
770                 else
771                         return 0;
772         }
773
774         /* broadcast to all secondary processes */
775         mp_dir = opendir(mp_dir_path);
776         if (!mp_dir) {
777                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
778                                 mp_dir_path);
779                 rte_errno = errno;
780                 return -1;
781         }
782
783         dir_fd = dirfd(mp_dir);
784         /* lock the directory to prevent processes spinning up while we send */
785         if (flock(dir_fd, LOCK_EX)) {
786                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
787                         mp_dir_path);
788                 rte_errno = errno;
789                 closedir(mp_dir);
790                 return -1;
791         }
792
793         while ((ent = readdir(mp_dir))) {
794                 char path[PATH_MAX];
795
796                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
797                         continue;
798
799                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
800                          ent->d_name);
801                 if (send_msg(path, msg, type) < 0)
802                         ret = -1;
803         }
804         /* unlock the dir */
805         flock(dir_fd, LOCK_UN);
806
807         /* dir_fd automatically closed on closedir */
808         closedir(mp_dir);
809         return ret;
810 }
811
812 static bool
813 check_input(const struct rte_mp_msg *msg)
814 {
815         if (msg == NULL) {
816                 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
817                 rte_errno = EINVAL;
818                 return false;
819         }
820
821         if (validate_action_name(msg->name))
822                 return false;
823
824         if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
825                 RTE_LOG(ERR, EAL, "Message data is too long\n");
826                 rte_errno = E2BIG;
827                 return false;
828         }
829
830         if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
831                 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
832                         RTE_MP_MAX_FD_NUM);
833                 rte_errno = E2BIG;
834                 return false;
835         }
836
837         return true;
838 }
839
840 int __rte_experimental
841 rte_mp_sendmsg(struct rte_mp_msg *msg)
842 {
843         if (!check_input(msg))
844                 return -1;
845
846         RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
847         return mp_send(msg, NULL, MP_MSG);
848 }
849
850 static int
851 mp_request_async(const char *dst, struct rte_mp_msg *req,
852                 struct async_request_param *param)
853 {
854         struct rte_mp_msg *reply_msg;
855         struct pending_request *sync_req, *exist;
856         int ret;
857
858         sync_req = malloc(sizeof(*sync_req));
859         reply_msg = malloc(sizeof(*reply_msg));
860         if (sync_req == NULL || reply_msg == NULL) {
861                 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
862                 rte_errno = ENOMEM;
863                 ret = -1;
864                 goto fail;
865         }
866
867         memset(sync_req, 0, sizeof(*sync_req));
868         memset(reply_msg, 0, sizeof(*reply_msg));
869
870         sync_req->type = REQUEST_TYPE_ASYNC;
871         strcpy(sync_req->dst, dst);
872         sync_req->request = req;
873         sync_req->reply = reply_msg;
874         sync_req->async.param = param;
875
876         /* queue already locked by caller */
877
878         exist = find_sync_request(dst, req->name);
879         if (exist) {
880                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
881                 rte_errno = EEXIST;
882                 ret = -1;
883                 goto fail;
884         }
885
886         ret = send_msg(dst, req, MP_REQ);
887         if (ret < 0) {
888                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
889                         dst, req->name);
890                 ret = -1;
891                 goto fail;
892         } else if (ret == 0) {
893                 ret = 0;
894                 goto fail;
895         }
896         TAILQ_INSERT_TAIL(&pending_requests.requests, sync_req, next);
897
898         param->user_reply.nb_sent++;
899
900         return 0;
901 fail:
902         free(sync_req);
903         free(reply_msg);
904         return ret;
905 }
906
907 static int
908 mp_request_sync(const char *dst, struct rte_mp_msg *req,
909                struct rte_mp_reply *reply, const struct timespec *ts)
910 {
911         int ret;
912         struct rte_mp_msg msg, *tmp;
913         struct pending_request sync_req, *exist;
914
915         sync_req.type = REQUEST_TYPE_SYNC;
916         sync_req.reply_received = 0;
917         strcpy(sync_req.dst, dst);
918         sync_req.request = req;
919         sync_req.reply = &msg;
920         pthread_cond_init(&sync_req.sync.cond, NULL);
921
922         pthread_mutex_lock(&pending_requests.lock);
923         exist = find_sync_request(dst, req->name);
924         if (exist) {
925                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
926                 rte_errno = EEXIST;
927                 pthread_mutex_unlock(&pending_requests.lock);
928                 return -1;
929         }
930
931         ret = send_msg(dst, req, MP_REQ);
932         if (ret < 0) {
933                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
934                         dst, req->name);
935                 return -1;
936         } else if (ret == 0)
937                 return 0;
938
939         TAILQ_INSERT_TAIL(&pending_requests.requests, &sync_req, next);
940
941         reply->nb_sent++;
942
943         do {
944                 ret = pthread_cond_timedwait(&sync_req.sync.cond,
945                                 &pending_requests.lock, ts);
946         } while (ret != 0 && ret != ETIMEDOUT);
947
948         /* We got the lock now */
949         TAILQ_REMOVE(&pending_requests.requests, &sync_req, next);
950         pthread_mutex_unlock(&pending_requests.lock);
951
952         if (sync_req.reply_received == 0) {
953                 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
954                         dst, req->name);
955                 rte_errno = ETIMEDOUT;
956                 return -1;
957         }
958         if (sync_req.reply_received == -1) {
959                 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
960                 /* not receiving this message is not an error, so decrement
961                  * number of sent messages
962                  */
963                 reply->nb_sent--;
964                 return 0;
965         }
966
967         tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
968         if (!tmp) {
969                 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
970                         dst, req->name);
971                 rte_errno = ENOMEM;
972                 return -1;
973         }
974         memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
975         reply->msgs = tmp;
976         reply->nb_received++;
977         return 0;
978 }
979
980 int __rte_experimental
981 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
982                 const struct timespec *ts)
983 {
984         int dir_fd, ret = 0;
985         DIR *mp_dir;
986         struct dirent *ent;
987         struct timeval now;
988         struct timespec end;
989
990         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
991
992         if (check_input(req) == false)
993                 return -1;
994         if (gettimeofday(&now, NULL) < 0) {
995                 RTE_LOG(ERR, EAL, "Faile to get current time\n");
996                 rte_errno = errno;
997                 return -1;
998         }
999
1000         end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1001         end.tv_sec = now.tv_sec + ts->tv_sec +
1002                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1003
1004         reply->nb_sent = 0;
1005         reply->nb_received = 0;
1006         reply->msgs = NULL;
1007
1008         /* for secondary process, send request to the primary process only */
1009         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
1010                 return mp_request_sync(eal_mp_socket_path(), req, reply, &end);
1011
1012         /* for primary process, broadcast request, and collect reply 1 by 1 */
1013         mp_dir = opendir(mp_dir_path);
1014         if (!mp_dir) {
1015                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1016                 rte_errno = errno;
1017                 return -1;
1018         }
1019
1020         dir_fd = dirfd(mp_dir);
1021         /* lock the directory to prevent processes spinning up while we send */
1022         if (flock(dir_fd, LOCK_EX)) {
1023                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1024                         mp_dir_path);
1025                 closedir(mp_dir);
1026                 rte_errno = errno;
1027                 return -1;
1028         }
1029
1030         while ((ent = readdir(mp_dir))) {
1031                 char path[PATH_MAX];
1032
1033                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1034                         continue;
1035
1036                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1037                          ent->d_name);
1038
1039                 if (mp_request_sync(path, req, reply, &end))
1040                         ret = -1;
1041         }
1042         /* unlock the directory */
1043         flock(dir_fd, LOCK_UN);
1044
1045         /* dir_fd automatically closed on closedir */
1046         closedir(mp_dir);
1047         return ret;
1048 }
1049
1050 int __rte_experimental
1051 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1052                 rte_mp_async_reply_t clb)
1053 {
1054         struct rte_mp_msg *copy;
1055         struct pending_request *dummy;
1056         struct async_request_param *param;
1057         struct rte_mp_reply *reply;
1058         int dir_fd, ret = 0;
1059         DIR *mp_dir;
1060         struct dirent *ent;
1061         struct timeval now;
1062         struct timespec *end;
1063         bool dummy_used = false;
1064
1065         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1066
1067         if (check_input(req) == false)
1068                 return -1;
1069         if (gettimeofday(&now, NULL) < 0) {
1070                 RTE_LOG(ERR, EAL, "Faile to get current time\n");
1071                 rte_errno = errno;
1072                 return -1;
1073         }
1074         copy = malloc(sizeof(*copy));
1075         dummy = malloc(sizeof(*dummy));
1076         param = malloc(sizeof(*param));
1077         if (copy == NULL || dummy == NULL || param == NULL) {
1078                 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1079                 rte_errno = ENOMEM;
1080                 goto fail;
1081         }
1082
1083         memset(copy, 0, sizeof(*copy));
1084         memset(dummy, 0, sizeof(*dummy));
1085         memset(param, 0, sizeof(*param));
1086
1087         /* copy message */
1088         memcpy(copy, req, sizeof(*copy));
1089
1090         param->n_responses_processed = 0;
1091         param->clb = clb;
1092         end = &param->end;
1093         reply = &param->user_reply;
1094
1095         end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1096         end->tv_sec = now.tv_sec + ts->tv_sec +
1097                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1098         reply->nb_sent = 0;
1099         reply->nb_received = 0;
1100         reply->msgs = NULL;
1101
1102         /* we have to lock the request queue here, as we will be adding a bunch
1103          * of requests to the queue at once, and some of the replies may arrive
1104          * before we add all of the requests to the queue.
1105          */
1106         pthread_mutex_lock(&pending_requests.lock);
1107
1108         /* we have to ensure that callback gets triggered even if we don't send
1109          * anything, therefore earlier we have allocated a dummy request. fill
1110          * it, and put it on the queue if we don't send any requests.
1111          */
1112         dummy->type = REQUEST_TYPE_ASYNC;
1113         dummy->request = copy;
1114         dummy->reply = NULL;
1115         dummy->async.param = param;
1116         dummy->reply_received = 1; /* short-circuit the timeout */
1117
1118         /* for secondary process, send request to the primary process only */
1119         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1120                 ret = mp_request_async(eal_mp_socket_path(), copy, param);
1121
1122                 /* if we didn't send anything, put dummy request on the queue */
1123                 if (ret == 0 && reply->nb_sent == 0) {
1124                         TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1125                                         next);
1126                         dummy_used = true;
1127                 }
1128
1129                 pthread_mutex_unlock(&pending_requests.lock);
1130
1131                 /* if we couldn't send anything, clean up */
1132                 if (ret != 0)
1133                         goto fail;
1134                 return 0;
1135         }
1136
1137         /* for primary process, broadcast request */
1138         mp_dir = opendir(mp_dir_path);
1139         if (!mp_dir) {
1140                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1141                 rte_errno = errno;
1142                 goto unlock_fail;
1143         }
1144         dir_fd = dirfd(mp_dir);
1145
1146         /* lock the directory to prevent processes spinning up while we send */
1147         if (flock(dir_fd, LOCK_EX)) {
1148                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1149                         mp_dir_path);
1150                 rte_errno = errno;
1151                 goto closedir_fail;
1152         }
1153
1154         while ((ent = readdir(mp_dir))) {
1155                 char path[PATH_MAX];
1156
1157                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1158                         continue;
1159
1160                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1161                          ent->d_name);
1162
1163                 if (mp_request_async(path, copy, param))
1164                         ret = -1;
1165         }
1166         /* if we didn't send anything, put dummy request on the queue */
1167         if (ret == 0 && reply->nb_sent == 0) {
1168                 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1169                 dummy_used = true;
1170         }
1171
1172         /* trigger async request thread wake up */
1173         pthread_cond_signal(&pending_requests.async_cond);
1174
1175         /* finally, unlock the queue */
1176         pthread_mutex_unlock(&pending_requests.lock);
1177
1178         /* unlock the directory */
1179         flock(dir_fd, LOCK_UN);
1180
1181         /* dir_fd automatically closed on closedir */
1182         closedir(mp_dir);
1183
1184         /* if dummy was unused, free it */
1185         if (!dummy_used)
1186                 free(dummy);
1187
1188         return ret;
1189 closedir_fail:
1190         closedir(mp_dir);
1191 unlock_fail:
1192         pthread_mutex_unlock(&pending_requests.lock);
1193 fail:
1194         free(dummy);
1195         free(param);
1196         free(copy);
1197         return -1;
1198 }
1199
1200 int __rte_experimental
1201 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1202 {
1203         RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1204
1205         if (check_input(msg) == false)
1206                 return -1;
1207
1208         if (peer == NULL) {
1209                 RTE_LOG(ERR, EAL, "peer is not specified\n");
1210                 rte_errno = EINVAL;
1211                 return -1;
1212         }
1213
1214         return mp_send(msg, peer, MP_REP);
1215 }