eal: close multi-process socket during cleanup
[dpdk.git] / lib / librte_eal / common / eal_common_proc.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <dirent.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <fnmatch.h>
9 #include <inttypes.h>
10 #include <libgen.h>
11 #include <limits.h>
12 #include <pthread.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/file.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/un.h>
21 #include <unistd.h>
22
23 #include <rte_alarm.h>
24 #include <rte_common.h>
25 #include <rte_cycles.h>
26 #include <rte_eal.h>
27 #include <rte_errno.h>
28 #include <rte_lcore.h>
29 #include <rte_log.h>
30 #include <rte_tailq.h>
31
32 #include "eal_private.h"
33 #include "eal_filesystem.h"
34 #include "eal_internal_cfg.h"
35
36 static int mp_fd = -1;
37 static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
38 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
39 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
40 static char peer_name[PATH_MAX];
41
42 struct action_entry {
43         TAILQ_ENTRY(action_entry) next;
44         char action_name[RTE_MP_MAX_NAME_LEN];
45         rte_mp_t action;
46 };
47
48 /** Double linked list of actions. */
49 TAILQ_HEAD(action_entry_list, action_entry);
50
51 static struct action_entry_list action_entry_list =
52         TAILQ_HEAD_INITIALIZER(action_entry_list);
53
54 enum mp_type {
55         MP_MSG, /* Share message with peers, will not block */
56         MP_REQ, /* Request for information, Will block for a reply */
57         MP_REP, /* Response to previously-received request */
58         MP_IGN, /* Response telling requester to ignore this response */
59 };
60
61 struct mp_msg_internal {
62         int type;
63         struct rte_mp_msg msg;
64 };
65
66 struct async_request_param {
67         rte_mp_async_reply_t clb;
68         struct rte_mp_reply user_reply;
69         struct timespec end;
70         int n_responses_processed;
71 };
72
73 struct pending_request {
74         TAILQ_ENTRY(pending_request) next;
75         enum {
76                 REQUEST_TYPE_SYNC,
77                 REQUEST_TYPE_ASYNC
78         } type;
79         char dst[PATH_MAX];
80         struct rte_mp_msg *request;
81         struct rte_mp_msg *reply;
82         int reply_received;
83         RTE_STD_C11
84         union {
85                 struct {
86                         struct async_request_param *param;
87                 } async;
88                 struct {
89                         pthread_cond_t cond;
90                 } sync;
91         };
92 };
93
94 TAILQ_HEAD(pending_request_list, pending_request);
95
96 static struct {
97         struct pending_request_list requests;
98         pthread_mutex_t lock;
99 } pending_requests = {
100         .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
101         .lock = PTHREAD_MUTEX_INITIALIZER,
102         /**< used in async requests only */
103 };
104
105 /* forward declarations */
106 static int
107 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
108
109 /* for use with alarm callback */
110 static void
111 async_reply_handle(void *arg);
112
113 /* for use with process_msg */
114 static struct pending_request *
115 async_reply_handle_thread_unsafe(void *arg);
116
117 static void
118 trigger_async_action(struct pending_request *req);
119
120 static struct pending_request *
121 find_pending_request(const char *dst, const char *act_name)
122 {
123         struct pending_request *r;
124
125         TAILQ_FOREACH(r, &pending_requests.requests, next) {
126                 if (!strcmp(r->dst, dst) &&
127                     !strcmp(r->request->name, act_name))
128                         break;
129         }
130
131         return r;
132 }
133
134 static void
135 create_socket_path(const char *name, char *buf, int len)
136 {
137         const char *prefix = eal_mp_socket_path();
138
139         if (strlen(name) > 0)
140                 snprintf(buf, len, "%s_%s", prefix, name);
141         else
142                 strlcpy(buf, prefix, len);
143 }
144
145 int
146 rte_eal_primary_proc_alive(const char *config_file_path)
147 {
148         int config_fd;
149
150         if (config_file_path)
151                 config_fd = open(config_file_path, O_RDONLY);
152         else {
153                 const char *path;
154
155                 path = eal_runtime_config_path();
156                 config_fd = open(path, O_RDONLY);
157         }
158         if (config_fd < 0)
159                 return 0;
160
161         int ret = lockf(config_fd, F_TEST, 0);
162         close(config_fd);
163
164         return !!ret;
165 }
166
167 static struct action_entry *
168 find_action_entry_by_name(const char *name)
169 {
170         struct action_entry *entry;
171
172         TAILQ_FOREACH(entry, &action_entry_list, next) {
173                 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
174                         break;
175         }
176
177         return entry;
178 }
179
180 static int
181 validate_action_name(const char *name)
182 {
183         if (name == NULL) {
184                 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
185                 rte_errno = EINVAL;
186                 return -1;
187         }
188         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
189                 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
190                 rte_errno = EINVAL;
191                 return -1;
192         }
193         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
194                 rte_errno = E2BIG;
195                 return -1;
196         }
197         return 0;
198 }
199
200 int __rte_experimental
201 rte_mp_action_register(const char *name, rte_mp_t action)
202 {
203         struct action_entry *entry;
204
205         if (validate_action_name(name))
206                 return -1;
207
208         entry = malloc(sizeof(struct action_entry));
209         if (entry == NULL) {
210                 rte_errno = ENOMEM;
211                 return -1;
212         }
213         strlcpy(entry->action_name, name, sizeof(entry->action_name));
214         entry->action = action;
215
216         pthread_mutex_lock(&mp_mutex_action);
217         if (find_action_entry_by_name(name) != NULL) {
218                 pthread_mutex_unlock(&mp_mutex_action);
219                 rte_errno = EEXIST;
220                 free(entry);
221                 return -1;
222         }
223         TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
224         pthread_mutex_unlock(&mp_mutex_action);
225         return 0;
226 }
227
228 void __rte_experimental
229 rte_mp_action_unregister(const char *name)
230 {
231         struct action_entry *entry;
232
233         if (validate_action_name(name))
234                 return;
235
236         pthread_mutex_lock(&mp_mutex_action);
237         entry = find_action_entry_by_name(name);
238         if (entry == NULL) {
239                 pthread_mutex_unlock(&mp_mutex_action);
240                 return;
241         }
242         TAILQ_REMOVE(&action_entry_list, entry, next);
243         pthread_mutex_unlock(&mp_mutex_action);
244         free(entry);
245 }
246
247 static int
248 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
249 {
250         int msglen;
251         struct iovec iov;
252         struct msghdr msgh;
253         char control[CMSG_SPACE(sizeof(m->msg.fds))];
254         struct cmsghdr *cmsg;
255         int buflen = sizeof(*m) - sizeof(m->msg.fds);
256
257         memset(&msgh, 0, sizeof(msgh));
258         iov.iov_base = m;
259         iov.iov_len  = buflen;
260
261         msgh.msg_name = s;
262         msgh.msg_namelen = sizeof(*s);
263         msgh.msg_iov = &iov;
264         msgh.msg_iovlen = 1;
265         msgh.msg_control = control;
266         msgh.msg_controllen = sizeof(control);
267
268         msglen = recvmsg(mp_fd, &msgh, 0);
269         if (msglen < 0) {
270                 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
271                 return -1;
272         }
273
274         if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
275                 RTE_LOG(ERR, EAL, "truncted msg\n");
276                 return -1;
277         }
278
279         /* read auxiliary FDs if any */
280         for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
281                 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
282                 if ((cmsg->cmsg_level == SOL_SOCKET) &&
283                         (cmsg->cmsg_type == SCM_RIGHTS)) {
284                         memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
285                         break;
286                 }
287         }
288
289         return 0;
290 }
291
292 static void
293 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
294 {
295         struct pending_request *pending_req;
296         struct action_entry *entry;
297         struct rte_mp_msg *msg = &m->msg;
298         rte_mp_t action = NULL;
299
300         RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
301
302         if (m->type == MP_REP || m->type == MP_IGN) {
303                 struct pending_request *req = NULL;
304
305                 pthread_mutex_lock(&pending_requests.lock);
306                 pending_req = find_pending_request(s->sun_path, msg->name);
307                 if (pending_req) {
308                         memcpy(pending_req->reply, msg, sizeof(*msg));
309                         /* -1 indicates that we've been asked to ignore */
310                         pending_req->reply_received =
311                                 m->type == MP_REP ? 1 : -1;
312
313                         if (pending_req->type == REQUEST_TYPE_SYNC)
314                                 pthread_cond_signal(&pending_req->sync.cond);
315                         else if (pending_req->type == REQUEST_TYPE_ASYNC)
316                                 req = async_reply_handle_thread_unsafe(
317                                                 pending_req);
318                 } else
319                         RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
320                 pthread_mutex_unlock(&pending_requests.lock);
321
322                 if (req != NULL)
323                         trigger_async_action(req);
324                 return;
325         }
326
327         pthread_mutex_lock(&mp_mutex_action);
328         entry = find_action_entry_by_name(msg->name);
329         if (entry != NULL)
330                 action = entry->action;
331         pthread_mutex_unlock(&mp_mutex_action);
332
333         if (!action) {
334                 if (m->type == MP_REQ && !internal_config.init_complete) {
335                         /* if this is a request, and init is not yet complete,
336                          * and callback wasn't registered, we should tell the
337                          * requester to ignore our existence because we're not
338                          * yet ready to process this request.
339                          */
340                         struct rte_mp_msg dummy;
341
342                         memset(&dummy, 0, sizeof(dummy));
343                         strlcpy(dummy.name, msg->name, sizeof(dummy.name));
344                         mp_send(&dummy, s->sun_path, MP_IGN);
345                 } else {
346                         RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
347                                 msg->name);
348                 }
349         } else if (action(msg, s->sun_path) < 0) {
350                 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
351         }
352 }
353
354 static void *
355 mp_handle(void *arg __rte_unused)
356 {
357         struct mp_msg_internal msg;
358         struct sockaddr_un sa;
359
360         while (1) {
361                 if (read_msg(&msg, &sa) == 0)
362                         process_msg(&msg, &sa);
363         }
364
365         return NULL;
366 }
367
368 static int
369 timespec_cmp(const struct timespec *a, const struct timespec *b)
370 {
371         if (a->tv_sec < b->tv_sec)
372                 return -1;
373         if (a->tv_sec > b->tv_sec)
374                 return 1;
375         if (a->tv_nsec < b->tv_nsec)
376                 return -1;
377         if (a->tv_nsec > b->tv_nsec)
378                 return 1;
379         return 0;
380 }
381
382 enum async_action {
383         ACTION_FREE, /**< free the action entry, but don't trigger callback */
384         ACTION_TRIGGER /**< trigger callback, then free action entry */
385 };
386
387 static enum async_action
388 process_async_request(struct pending_request *sr, const struct timespec *now)
389 {
390         struct async_request_param *param;
391         struct rte_mp_reply *reply;
392         bool timeout, last_msg;
393
394         param = sr->async.param;
395         reply = &param->user_reply;
396
397         /* did we timeout? */
398         timeout = timespec_cmp(&param->end, now) <= 0;
399
400         /* if we received a response, adjust relevant data and copy mesasge. */
401         if (sr->reply_received == 1 && sr->reply) {
402                 struct rte_mp_msg *msg, *user_msgs, *tmp;
403
404                 msg = sr->reply;
405                 user_msgs = reply->msgs;
406
407                 tmp = realloc(user_msgs, sizeof(*msg) *
408                                 (reply->nb_received + 1));
409                 if (!tmp) {
410                         RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
411                                 sr->dst, sr->request->name);
412                         /* this entry is going to be removed and its message
413                          * dropped, but we don't want to leak memory, so
414                          * continue.
415                          */
416                 } else {
417                         user_msgs = tmp;
418                         reply->msgs = user_msgs;
419                         memcpy(&user_msgs[reply->nb_received],
420                                         msg, sizeof(*msg));
421                         reply->nb_received++;
422                 }
423
424                 /* mark this request as processed */
425                 param->n_responses_processed++;
426         } else if (sr->reply_received == -1) {
427                 /* we were asked to ignore this process */
428                 reply->nb_sent--;
429         } else if (timeout) {
430                 /* count it as processed response, but don't increment
431                  * nb_received.
432                  */
433                 param->n_responses_processed++;
434         }
435
436         free(sr->reply);
437
438         last_msg = param->n_responses_processed == reply->nb_sent;
439
440         return last_msg ? ACTION_TRIGGER : ACTION_FREE;
441 }
442
443 static void
444 trigger_async_action(struct pending_request *sr)
445 {
446         struct async_request_param *param;
447         struct rte_mp_reply *reply;
448
449         param = sr->async.param;
450         reply = &param->user_reply;
451
452         param->clb(sr->request, reply);
453
454         /* clean up */
455         free(sr->async.param->user_reply.msgs);
456         free(sr->async.param);
457         free(sr->request);
458         free(sr);
459 }
460
461 static struct pending_request *
462 async_reply_handle_thread_unsafe(void *arg)
463 {
464         struct pending_request *req = (struct pending_request *)arg;
465         enum async_action action;
466         struct timespec ts_now;
467         struct timeval now;
468
469         if (gettimeofday(&now, NULL) < 0) {
470                 RTE_LOG(ERR, EAL, "Cannot get current time\n");
471                 goto no_trigger;
472         }
473         ts_now.tv_nsec = now.tv_usec * 1000;
474         ts_now.tv_sec = now.tv_sec;
475
476         action = process_async_request(req, &ts_now);
477
478         TAILQ_REMOVE(&pending_requests.requests, req, next);
479
480         if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
481                 /* if we failed to cancel the alarm because it's already in
482                  * progress, don't proceed because otherwise we will end up
483                  * handling the same message twice.
484                  */
485                 if (rte_errno == EINPROGRESS) {
486                         RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
487                         goto no_trigger;
488                 }
489                 RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
490         }
491
492         if (action == ACTION_TRIGGER)
493                 return req;
494 no_trigger:
495         free(req);
496         return NULL;
497 }
498
499 static void
500 async_reply_handle(void *arg)
501 {
502         struct pending_request *req;
503
504         pthread_mutex_lock(&pending_requests.lock);
505         req = async_reply_handle_thread_unsafe(arg);
506         pthread_mutex_unlock(&pending_requests.lock);
507
508         if (req != NULL)
509                 trigger_async_action(req);
510 }
511
512 static int
513 open_socket_fd(void)
514 {
515         struct sockaddr_un un;
516
517         peer_name[0] = '\0';
518         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
519                 snprintf(peer_name, sizeof(peer_name),
520                                 "%d_%"PRIx64, getpid(), rte_rdtsc());
521
522         mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
523         if (mp_fd < 0) {
524                 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
525                 return -1;
526         }
527
528         memset(&un, 0, sizeof(un));
529         un.sun_family = AF_UNIX;
530
531         create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
532
533         unlink(un.sun_path); /* May still exist since last run */
534
535         if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
536                 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
537                         un.sun_path, strerror(errno));
538                 close(mp_fd);
539                 return -1;
540         }
541
542         RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
543         return mp_fd;
544 }
545
546 static void
547 close_socket_fd(void)
548 {
549         char path[PATH_MAX];
550
551         if (mp_fd < 0)
552                 return;
553
554         close(mp_fd);
555         create_socket_path(peer_name, path, sizeof(path));
556         unlink(path);
557 }
558
559 int
560 rte_mp_channel_init(void)
561 {
562         char path[PATH_MAX];
563         int dir_fd;
564         pthread_t mp_handle_tid;
565
566         /* in no shared files mode, we do not have secondary processes support,
567          * so no need to initialize IPC.
568          */
569         if (internal_config.no_shconf) {
570                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
571                 return 0;
572         }
573
574         /* create filter path */
575         create_socket_path("*", path, sizeof(path));
576         strlcpy(mp_filter, basename(path), sizeof(mp_filter));
577
578         /* path may have been modified, so recreate it */
579         create_socket_path("*", path, sizeof(path));
580         strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
581
582         /* lock the directory */
583         dir_fd = open(mp_dir_path, O_RDONLY);
584         if (dir_fd < 0) {
585                 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
586                         mp_dir_path, strerror(errno));
587                 return -1;
588         }
589
590         if (flock(dir_fd, LOCK_EX)) {
591                 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
592                         mp_dir_path, strerror(errno));
593                 close(dir_fd);
594                 return -1;
595         }
596
597         if (open_socket_fd() < 0) {
598                 close(dir_fd);
599                 return -1;
600         }
601
602         if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
603                         NULL, mp_handle, NULL) < 0) {
604                 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
605                         strerror(errno));
606                 close(mp_fd);
607                 close(dir_fd);
608                 mp_fd = -1;
609                 return -1;
610         }
611
612         /* unlock the directory */
613         flock(dir_fd, LOCK_UN);
614         close(dir_fd);
615
616         return 0;
617 }
618
619 void
620 rte_mp_channel_cleanup(void)
621 {
622         close_socket_fd();
623 }
624
625 /**
626  * Return -1, as fail to send message and it's caused by the local side.
627  * Return 0, as fail to send message and it's caused by the remote side.
628  * Return 1, as succeed to send message.
629  *
630  */
631 static int
632 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
633 {
634         int snd;
635         struct iovec iov;
636         struct msghdr msgh;
637         struct cmsghdr *cmsg;
638         struct sockaddr_un dst;
639         struct mp_msg_internal m;
640         int fd_size = msg->num_fds * sizeof(int);
641         char control[CMSG_SPACE(fd_size)];
642
643         m.type = type;
644         memcpy(&m.msg, msg, sizeof(*msg));
645
646         memset(&dst, 0, sizeof(dst));
647         dst.sun_family = AF_UNIX;
648         strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
649
650         memset(&msgh, 0, sizeof(msgh));
651         memset(control, 0, sizeof(control));
652
653         iov.iov_base = &m;
654         iov.iov_len = sizeof(m) - sizeof(msg->fds);
655
656         msgh.msg_name = &dst;
657         msgh.msg_namelen = sizeof(dst);
658         msgh.msg_iov = &iov;
659         msgh.msg_iovlen = 1;
660         msgh.msg_control = control;
661         msgh.msg_controllen = sizeof(control);
662
663         cmsg = CMSG_FIRSTHDR(&msgh);
664         cmsg->cmsg_len = CMSG_LEN(fd_size);
665         cmsg->cmsg_level = SOL_SOCKET;
666         cmsg->cmsg_type = SCM_RIGHTS;
667         memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
668
669         do {
670                 snd = sendmsg(mp_fd, &msgh, 0);
671         } while (snd < 0 && errno == EINTR);
672
673         if (snd < 0) {
674                 rte_errno = errno;
675                 /* Check if it caused by peer process exits */
676                 if (errno == ECONNREFUSED &&
677                                 rte_eal_process_type() == RTE_PROC_PRIMARY) {
678                         unlink(dst_path);
679                         return 0;
680                 }
681                 if (errno == ENOBUFS) {
682                         RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
683                                 dst_path);
684                         return 0;
685                 }
686                 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
687                         dst_path, strerror(errno));
688                 return -1;
689         }
690
691         return 1;
692 }
693
694 static int
695 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
696 {
697         int dir_fd, ret = 0;
698         DIR *mp_dir;
699         struct dirent *ent;
700
701         if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
702                 peer = eal_mp_socket_path();
703
704         if (peer) {
705                 if (send_msg(peer, msg, type) < 0)
706                         return -1;
707                 else
708                         return 0;
709         }
710
711         /* broadcast to all secondary processes */
712         mp_dir = opendir(mp_dir_path);
713         if (!mp_dir) {
714                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
715                                 mp_dir_path);
716                 rte_errno = errno;
717                 return -1;
718         }
719
720         dir_fd = dirfd(mp_dir);
721         /* lock the directory to prevent processes spinning up while we send */
722         if (flock(dir_fd, LOCK_SH)) {
723                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
724                         mp_dir_path);
725                 rte_errno = errno;
726                 closedir(mp_dir);
727                 return -1;
728         }
729
730         while ((ent = readdir(mp_dir))) {
731                 char path[PATH_MAX];
732
733                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
734                         continue;
735
736                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
737                          ent->d_name);
738                 if (send_msg(path, msg, type) < 0)
739                         ret = -1;
740         }
741         /* unlock the dir */
742         flock(dir_fd, LOCK_UN);
743
744         /* dir_fd automatically closed on closedir */
745         closedir(mp_dir);
746         return ret;
747 }
748
749 static bool
750 check_input(const struct rte_mp_msg *msg)
751 {
752         if (msg == NULL) {
753                 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
754                 rte_errno = EINVAL;
755                 return false;
756         }
757
758         if (validate_action_name(msg->name))
759                 return false;
760
761         if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
762                 RTE_LOG(ERR, EAL, "Message data is too long\n");
763                 rte_errno = E2BIG;
764                 return false;
765         }
766
767         if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
768                 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
769                         RTE_MP_MAX_FD_NUM);
770                 rte_errno = E2BIG;
771                 return false;
772         }
773
774         return true;
775 }
776
777 int __rte_experimental
778 rte_mp_sendmsg(struct rte_mp_msg *msg)
779 {
780         if (!check_input(msg))
781                 return -1;
782
783         RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
784         return mp_send(msg, NULL, MP_MSG);
785 }
786
787 static int
788 mp_request_async(const char *dst, struct rte_mp_msg *req,
789                 struct async_request_param *param, const struct timespec *ts)
790 {
791         struct rte_mp_msg *reply_msg;
792         struct pending_request *pending_req, *exist;
793         int ret = -1;
794
795         pending_req = calloc(1, sizeof(*pending_req));
796         reply_msg = calloc(1, sizeof(*reply_msg));
797         if (pending_req == NULL || reply_msg == NULL) {
798                 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
799                 rte_errno = ENOMEM;
800                 ret = -1;
801                 goto fail;
802         }
803
804         pending_req->type = REQUEST_TYPE_ASYNC;
805         strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
806         pending_req->request = req;
807         pending_req->reply = reply_msg;
808         pending_req->async.param = param;
809
810         /* queue already locked by caller */
811
812         exist = find_pending_request(dst, req->name);
813         if (exist) {
814                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
815                 rte_errno = EEXIST;
816                 ret = -1;
817                 goto fail;
818         }
819
820         ret = send_msg(dst, req, MP_REQ);
821         if (ret < 0) {
822                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
823                         dst, req->name);
824                 ret = -1;
825                 goto fail;
826         } else if (ret == 0) {
827                 ret = 0;
828                 goto fail;
829         }
830         param->user_reply.nb_sent++;
831
832         /* if alarm set fails, we simply ignore the reply */
833         if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
834                               async_reply_handle, pending_req) < 0) {
835                 RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
836                         dst, req->name);
837                 ret = -1;
838                 goto fail;
839         }
840         TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
841
842         return 0;
843 fail:
844         free(pending_req);
845         free(reply_msg);
846         return ret;
847 }
848
849 static int
850 mp_request_sync(const char *dst, struct rte_mp_msg *req,
851                struct rte_mp_reply *reply, const struct timespec *ts)
852 {
853         int ret;
854         struct rte_mp_msg msg, *tmp;
855         struct pending_request pending_req, *exist;
856
857         pending_req.type = REQUEST_TYPE_SYNC;
858         pending_req.reply_received = 0;
859         strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
860         pending_req.request = req;
861         pending_req.reply = &msg;
862         pthread_cond_init(&pending_req.sync.cond, NULL);
863
864         exist = find_pending_request(dst, req->name);
865         if (exist) {
866                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
867                 rte_errno = EEXIST;
868                 return -1;
869         }
870
871         ret = send_msg(dst, req, MP_REQ);
872         if (ret < 0) {
873                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
874                         dst, req->name);
875                 return -1;
876         } else if (ret == 0)
877                 return 0;
878
879         TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
880
881         reply->nb_sent++;
882
883         do {
884                 ret = pthread_cond_timedwait(&pending_req.sync.cond,
885                                 &pending_requests.lock, ts);
886         } while (ret != 0 && ret != ETIMEDOUT);
887
888         TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
889
890         if (pending_req.reply_received == 0) {
891                 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
892                         dst, req->name);
893                 rte_errno = ETIMEDOUT;
894                 return -1;
895         }
896         if (pending_req.reply_received == -1) {
897                 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
898                 /* not receiving this message is not an error, so decrement
899                  * number of sent messages
900                  */
901                 reply->nb_sent--;
902                 return 0;
903         }
904
905         tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
906         if (!tmp) {
907                 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
908                         dst, req->name);
909                 rte_errno = ENOMEM;
910                 return -1;
911         }
912         memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
913         reply->msgs = tmp;
914         reply->nb_received++;
915         return 0;
916 }
917
918 int __rte_experimental
919 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
920                 const struct timespec *ts)
921 {
922         int dir_fd, ret = 0;
923         DIR *mp_dir;
924         struct dirent *ent;
925         struct timeval now;
926         struct timespec end;
927
928         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
929
930         if (check_input(req) == false)
931                 return -1;
932
933         reply->nb_sent = 0;
934         reply->nb_received = 0;
935         reply->msgs = NULL;
936
937         if (internal_config.no_shconf) {
938                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
939                 return 0;
940         }
941
942         if (gettimeofday(&now, NULL) < 0) {
943                 RTE_LOG(ERR, EAL, "Failed to get current time\n");
944                 rte_errno = errno;
945                 return -1;
946         }
947
948         end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
949         end.tv_sec = now.tv_sec + ts->tv_sec +
950                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
951
952         /* for secondary process, send request to the primary process only */
953         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
954                 pthread_mutex_lock(&pending_requests.lock);
955                 ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
956                 pthread_mutex_unlock(&pending_requests.lock);
957                 return ret;
958         }
959
960         /* for primary process, broadcast request, and collect reply 1 by 1 */
961         mp_dir = opendir(mp_dir_path);
962         if (!mp_dir) {
963                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
964                 rte_errno = errno;
965                 return -1;
966         }
967
968         dir_fd = dirfd(mp_dir);
969         /* lock the directory to prevent processes spinning up while we send */
970         if (flock(dir_fd, LOCK_SH)) {
971                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
972                         mp_dir_path);
973                 closedir(mp_dir);
974                 rte_errno = errno;
975                 return -1;
976         }
977
978         pthread_mutex_lock(&pending_requests.lock);
979         while ((ent = readdir(mp_dir))) {
980                 char path[PATH_MAX];
981
982                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
983                         continue;
984
985                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
986                          ent->d_name);
987
988                 /* unlocks the mutex while waiting for response,
989                  * locks on receive
990                  */
991                 if (mp_request_sync(path, req, reply, &end))
992                         ret = -1;
993         }
994         pthread_mutex_unlock(&pending_requests.lock);
995         /* unlock the directory */
996         flock(dir_fd, LOCK_UN);
997
998         /* dir_fd automatically closed on closedir */
999         closedir(mp_dir);
1000         return ret;
1001 }
1002
1003 int __rte_experimental
1004 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1005                 rte_mp_async_reply_t clb)
1006 {
1007         struct rte_mp_msg *copy;
1008         struct pending_request *dummy;
1009         struct async_request_param *param;
1010         struct rte_mp_reply *reply;
1011         int dir_fd, ret = 0;
1012         DIR *mp_dir;
1013         struct dirent *ent;
1014         struct timeval now;
1015         struct timespec *end;
1016         bool dummy_used = false;
1017
1018         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1019
1020         if (check_input(req) == false)
1021                 return -1;
1022
1023         if (internal_config.no_shconf) {
1024                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1025                 return 0;
1026         }
1027
1028         if (gettimeofday(&now, NULL) < 0) {
1029                 RTE_LOG(ERR, EAL, "Faile to get current time\n");
1030                 rte_errno = errno;
1031                 return -1;
1032         }
1033         copy = calloc(1, sizeof(*copy));
1034         dummy = calloc(1, sizeof(*dummy));
1035         param = calloc(1, sizeof(*param));
1036         if (copy == NULL || dummy == NULL || param == NULL) {
1037                 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1038                 rte_errno = ENOMEM;
1039                 goto fail;
1040         }
1041
1042         /* copy message */
1043         memcpy(copy, req, sizeof(*copy));
1044
1045         param->n_responses_processed = 0;
1046         param->clb = clb;
1047         end = &param->end;
1048         reply = &param->user_reply;
1049
1050         end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1051         end->tv_sec = now.tv_sec + ts->tv_sec +
1052                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1053         reply->nb_sent = 0;
1054         reply->nb_received = 0;
1055         reply->msgs = NULL;
1056
1057         /* we have to lock the request queue here, as we will be adding a bunch
1058          * of requests to the queue at once, and some of the replies may arrive
1059          * before we add all of the requests to the queue.
1060          */
1061         pthread_mutex_lock(&pending_requests.lock);
1062
1063         /* we have to ensure that callback gets triggered even if we don't send
1064          * anything, therefore earlier we have allocated a dummy request. fill
1065          * it, and put it on the queue if we don't send any requests.
1066          */
1067         dummy->type = REQUEST_TYPE_ASYNC;
1068         dummy->request = copy;
1069         dummy->reply = NULL;
1070         dummy->async.param = param;
1071         dummy->reply_received = 1; /* short-circuit the timeout */
1072
1073         /* for secondary process, send request to the primary process only */
1074         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1075                 ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
1076
1077                 /* if we didn't send anything, put dummy request on the queue */
1078                 if (ret == 0 && reply->nb_sent == 0) {
1079                         TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1080                                         next);
1081                         dummy_used = true;
1082                 }
1083
1084                 pthread_mutex_unlock(&pending_requests.lock);
1085
1086                 /* if we couldn't send anything, clean up */
1087                 if (ret != 0)
1088                         goto fail;
1089                 return 0;
1090         }
1091
1092         /* for primary process, broadcast request */
1093         mp_dir = opendir(mp_dir_path);
1094         if (!mp_dir) {
1095                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1096                 rte_errno = errno;
1097                 goto unlock_fail;
1098         }
1099         dir_fd = dirfd(mp_dir);
1100
1101         /* lock the directory to prevent processes spinning up while we send */
1102         if (flock(dir_fd, LOCK_SH)) {
1103                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1104                         mp_dir_path);
1105                 rte_errno = errno;
1106                 goto closedir_fail;
1107         }
1108
1109         while ((ent = readdir(mp_dir))) {
1110                 char path[PATH_MAX];
1111
1112                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1113                         continue;
1114
1115                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1116                          ent->d_name);
1117
1118                 if (mp_request_async(path, copy, param, ts))
1119                         ret = -1;
1120         }
1121         /* if we didn't send anything, put dummy request on the queue */
1122         if (ret == 0 && reply->nb_sent == 0) {
1123                 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1124                 dummy_used = true;
1125         }
1126
1127         /* finally, unlock the queue */
1128         pthread_mutex_unlock(&pending_requests.lock);
1129
1130         /* unlock the directory */
1131         flock(dir_fd, LOCK_UN);
1132
1133         /* dir_fd automatically closed on closedir */
1134         closedir(mp_dir);
1135
1136         /* if dummy was unused, free it */
1137         if (!dummy_used)
1138                 free(dummy);
1139
1140         return ret;
1141 closedir_fail:
1142         closedir(mp_dir);
1143 unlock_fail:
1144         pthread_mutex_unlock(&pending_requests.lock);
1145 fail:
1146         free(dummy);
1147         free(param);
1148         free(copy);
1149         return -1;
1150 }
1151
1152 int __rte_experimental
1153 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1154 {
1155         RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1156
1157         if (check_input(msg) == false)
1158                 return -1;
1159
1160         if (peer == NULL) {
1161                 RTE_LOG(ERR, EAL, "peer is not specified\n");
1162                 rte_errno = EINVAL;
1163                 return -1;
1164         }
1165
1166         if (internal_config.no_shconf) {
1167                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1168                 return 0;
1169         }
1170
1171         return mp_send(msg, peer, MP_REP);
1172 }