ipc: fix send error handling
[dpdk.git] / lib / librte_eal / common / eal_common_proc.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2018 Intel Corporation
3  */
4
5 #include <dirent.h>
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <fnmatch.h>
9 #include <inttypes.h>
10 #include <libgen.h>
11 #include <limits.h>
12 #include <pthread.h>
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <sys/file.h>
17 #include <sys/time.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/un.h>
21 #include <unistd.h>
22
23 #include <rte_alarm.h>
24 #include <rte_common.h>
25 #include <rte_cycles.h>
26 #include <rte_eal.h>
27 #include <rte_errno.h>
28 #include <rte_lcore.h>
29 #include <rte_log.h>
30 #include <rte_tailq.h>
31
32 #include "eal_private.h"
33 #include "eal_filesystem.h"
34 #include "eal_internal_cfg.h"
35
36 static int mp_fd = -1;
37 static char mp_filter[PATH_MAX];   /* Filter for secondary process sockets */
38 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
39 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
40 static char peer_name[PATH_MAX];
41
42 struct action_entry {
43         TAILQ_ENTRY(action_entry) next;
44         char action_name[RTE_MP_MAX_NAME_LEN];
45         rte_mp_t action;
46 };
47
48 /** Double linked list of actions. */
49 TAILQ_HEAD(action_entry_list, action_entry);
50
51 static struct action_entry_list action_entry_list =
52         TAILQ_HEAD_INITIALIZER(action_entry_list);
53
54 enum mp_type {
55         MP_MSG, /* Share message with peers, will not block */
56         MP_REQ, /* Request for information, Will block for a reply */
57         MP_REP, /* Response to previously-received request */
58         MP_IGN, /* Response telling requester to ignore this response */
59 };
60
61 struct mp_msg_internal {
62         int type;
63         struct rte_mp_msg msg;
64 };
65
66 struct async_request_param {
67         rte_mp_async_reply_t clb;
68         struct rte_mp_reply user_reply;
69         struct timespec end;
70         int n_responses_processed;
71 };
72
73 struct pending_request {
74         TAILQ_ENTRY(pending_request) next;
75         enum {
76                 REQUEST_TYPE_SYNC,
77                 REQUEST_TYPE_ASYNC
78         } type;
79         char dst[PATH_MAX];
80         struct rte_mp_msg *request;
81         struct rte_mp_msg *reply;
82         int reply_received;
83         RTE_STD_C11
84         union {
85                 struct {
86                         struct async_request_param *param;
87                 } async;
88                 struct {
89                         pthread_cond_t cond;
90                 } sync;
91         };
92 };
93
94 TAILQ_HEAD(pending_request_list, pending_request);
95
96 static struct {
97         struct pending_request_list requests;
98         pthread_mutex_t lock;
99 } pending_requests = {
100         .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
101         .lock = PTHREAD_MUTEX_INITIALIZER,
102         /**< used in async requests only */
103 };
104
105 /* forward declarations */
106 static int
107 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
108
109 /* for use with alarm callback */
110 static void
111 async_reply_handle(void *arg);
112
113 /* for use with process_msg */
114 static struct pending_request *
115 async_reply_handle_thread_unsafe(void *arg);
116
117 static void
118 trigger_async_action(struct pending_request *req);
119
120 static struct pending_request *
121 find_pending_request(const char *dst, const char *act_name)
122 {
123         struct pending_request *r;
124
125         TAILQ_FOREACH(r, &pending_requests.requests, next) {
126                 if (!strcmp(r->dst, dst) &&
127                     !strcmp(r->request->name, act_name))
128                         break;
129         }
130
131         return r;
132 }
133
134 static void
135 create_socket_path(const char *name, char *buf, int len)
136 {
137         const char *prefix = eal_mp_socket_path();
138
139         if (strlen(name) > 0)
140                 snprintf(buf, len, "%s_%s", prefix, name);
141         else
142                 strlcpy(buf, prefix, len);
143 }
144
145 int
146 rte_eal_primary_proc_alive(const char *config_file_path)
147 {
148         int config_fd;
149
150         if (config_file_path)
151                 config_fd = open(config_file_path, O_RDONLY);
152         else {
153                 const char *path;
154
155                 path = eal_runtime_config_path();
156                 config_fd = open(path, O_RDONLY);
157         }
158         if (config_fd < 0)
159                 return 0;
160
161         int ret = lockf(config_fd, F_TEST, 0);
162         close(config_fd);
163
164         return !!ret;
165 }
166
167 static struct action_entry *
168 find_action_entry_by_name(const char *name)
169 {
170         struct action_entry *entry;
171
172         TAILQ_FOREACH(entry, &action_entry_list, next) {
173                 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
174                         break;
175         }
176
177         return entry;
178 }
179
180 static int
181 validate_action_name(const char *name)
182 {
183         if (name == NULL) {
184                 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
185                 rte_errno = EINVAL;
186                 return -1;
187         }
188         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
189                 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
190                 rte_errno = EINVAL;
191                 return -1;
192         }
193         if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
194                 rte_errno = E2BIG;
195                 return -1;
196         }
197         return 0;
198 }
199
200 int __rte_experimental
201 rte_mp_action_register(const char *name, rte_mp_t action)
202 {
203         struct action_entry *entry;
204
205         if (validate_action_name(name))
206                 return -1;
207
208         entry = malloc(sizeof(struct action_entry));
209         if (entry == NULL) {
210                 rte_errno = ENOMEM;
211                 return -1;
212         }
213         strlcpy(entry->action_name, name, sizeof(entry->action_name));
214         entry->action = action;
215
216         pthread_mutex_lock(&mp_mutex_action);
217         if (find_action_entry_by_name(name) != NULL) {
218                 pthread_mutex_unlock(&mp_mutex_action);
219                 rte_errno = EEXIST;
220                 free(entry);
221                 return -1;
222         }
223         TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
224         pthread_mutex_unlock(&mp_mutex_action);
225         return 0;
226 }
227
228 void __rte_experimental
229 rte_mp_action_unregister(const char *name)
230 {
231         struct action_entry *entry;
232
233         if (validate_action_name(name))
234                 return;
235
236         pthread_mutex_lock(&mp_mutex_action);
237         entry = find_action_entry_by_name(name);
238         if (entry == NULL) {
239                 pthread_mutex_unlock(&mp_mutex_action);
240                 return;
241         }
242         TAILQ_REMOVE(&action_entry_list, entry, next);
243         pthread_mutex_unlock(&mp_mutex_action);
244         free(entry);
245 }
246
247 static int
248 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
249 {
250         int msglen;
251         struct iovec iov;
252         struct msghdr msgh;
253         char control[CMSG_SPACE(sizeof(m->msg.fds))];
254         struct cmsghdr *cmsg;
255         int buflen = sizeof(*m) - sizeof(m->msg.fds);
256
257         memset(&msgh, 0, sizeof(msgh));
258         iov.iov_base = m;
259         iov.iov_len  = buflen;
260
261         msgh.msg_name = s;
262         msgh.msg_namelen = sizeof(*s);
263         msgh.msg_iov = &iov;
264         msgh.msg_iovlen = 1;
265         msgh.msg_control = control;
266         msgh.msg_controllen = sizeof(control);
267
268         msglen = recvmsg(mp_fd, &msgh, 0);
269         if (msglen < 0) {
270                 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
271                 return -1;
272         }
273
274         if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
275                 RTE_LOG(ERR, EAL, "truncted msg\n");
276                 return -1;
277         }
278
279         /* read auxiliary FDs if any */
280         for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
281                 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
282                 if ((cmsg->cmsg_level == SOL_SOCKET) &&
283                         (cmsg->cmsg_type == SCM_RIGHTS)) {
284                         memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
285                         break;
286                 }
287         }
288
289         return 0;
290 }
291
292 static void
293 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
294 {
295         struct pending_request *pending_req;
296         struct action_entry *entry;
297         struct rte_mp_msg *msg = &m->msg;
298         rte_mp_t action = NULL;
299
300         RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
301
302         if (m->type == MP_REP || m->type == MP_IGN) {
303                 struct pending_request *req = NULL;
304
305                 pthread_mutex_lock(&pending_requests.lock);
306                 pending_req = find_pending_request(s->sun_path, msg->name);
307                 if (pending_req) {
308                         memcpy(pending_req->reply, msg, sizeof(*msg));
309                         /* -1 indicates that we've been asked to ignore */
310                         pending_req->reply_received =
311                                 m->type == MP_REP ? 1 : -1;
312
313                         if (pending_req->type == REQUEST_TYPE_SYNC)
314                                 pthread_cond_signal(&pending_req->sync.cond);
315                         else if (pending_req->type == REQUEST_TYPE_ASYNC)
316                                 req = async_reply_handle_thread_unsafe(
317                                                 pending_req);
318                 } else
319                         RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
320                 pthread_mutex_unlock(&pending_requests.lock);
321
322                 if (req != NULL)
323                         trigger_async_action(req);
324                 return;
325         }
326
327         pthread_mutex_lock(&mp_mutex_action);
328         entry = find_action_entry_by_name(msg->name);
329         if (entry != NULL)
330                 action = entry->action;
331         pthread_mutex_unlock(&mp_mutex_action);
332
333         if (!action) {
334                 if (m->type == MP_REQ && !internal_config.init_complete) {
335                         /* if this is a request, and init is not yet complete,
336                          * and callback wasn't registered, we should tell the
337                          * requester to ignore our existence because we're not
338                          * yet ready to process this request.
339                          */
340                         struct rte_mp_msg dummy;
341
342                         memset(&dummy, 0, sizeof(dummy));
343                         strlcpy(dummy.name, msg->name, sizeof(dummy.name));
344                         mp_send(&dummy, s->sun_path, MP_IGN);
345                 } else {
346                         RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
347                                 msg->name);
348                 }
349         } else if (action(msg, s->sun_path) < 0) {
350                 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
351         }
352 }
353
354 static void *
355 mp_handle(void *arg __rte_unused)
356 {
357         struct mp_msg_internal msg;
358         struct sockaddr_un sa;
359
360         while (1) {
361                 if (read_msg(&msg, &sa) == 0)
362                         process_msg(&msg, &sa);
363         }
364
365         return NULL;
366 }
367
368 static int
369 timespec_cmp(const struct timespec *a, const struct timespec *b)
370 {
371         if (a->tv_sec < b->tv_sec)
372                 return -1;
373         if (a->tv_sec > b->tv_sec)
374                 return 1;
375         if (a->tv_nsec < b->tv_nsec)
376                 return -1;
377         if (a->tv_nsec > b->tv_nsec)
378                 return 1;
379         return 0;
380 }
381
382 enum async_action {
383         ACTION_FREE, /**< free the action entry, but don't trigger callback */
384         ACTION_TRIGGER /**< trigger callback, then free action entry */
385 };
386
387 static enum async_action
388 process_async_request(struct pending_request *sr, const struct timespec *now)
389 {
390         struct async_request_param *param;
391         struct rte_mp_reply *reply;
392         bool timeout, last_msg;
393
394         param = sr->async.param;
395         reply = &param->user_reply;
396
397         /* did we timeout? */
398         timeout = timespec_cmp(&param->end, now) <= 0;
399
400         /* if we received a response, adjust relevant data and copy mesasge. */
401         if (sr->reply_received == 1 && sr->reply) {
402                 struct rte_mp_msg *msg, *user_msgs, *tmp;
403
404                 msg = sr->reply;
405                 user_msgs = reply->msgs;
406
407                 tmp = realloc(user_msgs, sizeof(*msg) *
408                                 (reply->nb_received + 1));
409                 if (!tmp) {
410                         RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
411                                 sr->dst, sr->request->name);
412                         /* this entry is going to be removed and its message
413                          * dropped, but we don't want to leak memory, so
414                          * continue.
415                          */
416                 } else {
417                         user_msgs = tmp;
418                         reply->msgs = user_msgs;
419                         memcpy(&user_msgs[reply->nb_received],
420                                         msg, sizeof(*msg));
421                         reply->nb_received++;
422                 }
423
424                 /* mark this request as processed */
425                 param->n_responses_processed++;
426         } else if (sr->reply_received == -1) {
427                 /* we were asked to ignore this process */
428                 reply->nb_sent--;
429         } else if (timeout) {
430                 /* count it as processed response, but don't increment
431                  * nb_received.
432                  */
433                 param->n_responses_processed++;
434         }
435
436         free(sr->reply);
437
438         last_msg = param->n_responses_processed == reply->nb_sent;
439
440         return last_msg ? ACTION_TRIGGER : ACTION_FREE;
441 }
442
443 static void
444 trigger_async_action(struct pending_request *sr)
445 {
446         struct async_request_param *param;
447         struct rte_mp_reply *reply;
448
449         param = sr->async.param;
450         reply = &param->user_reply;
451
452         param->clb(sr->request, reply);
453
454         /* clean up */
455         free(sr->async.param->user_reply.msgs);
456         free(sr->async.param);
457         free(sr->request);
458         free(sr);
459 }
460
461 static struct pending_request *
462 async_reply_handle_thread_unsafe(void *arg)
463 {
464         struct pending_request *req = (struct pending_request *)arg;
465         enum async_action action;
466         struct timespec ts_now;
467         struct timeval now;
468
469         if (gettimeofday(&now, NULL) < 0) {
470                 RTE_LOG(ERR, EAL, "Cannot get current time\n");
471                 goto no_trigger;
472         }
473         ts_now.tv_nsec = now.tv_usec * 1000;
474         ts_now.tv_sec = now.tv_sec;
475
476         action = process_async_request(req, &ts_now);
477
478         TAILQ_REMOVE(&pending_requests.requests, req, next);
479
480         if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
481                 /* if we failed to cancel the alarm because it's already in
482                  * progress, don't proceed because otherwise we will end up
483                  * handling the same message twice.
484                  */
485                 if (rte_errno == EINPROGRESS) {
486                         RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
487                         goto no_trigger;
488                 }
489                 RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
490         }
491
492         if (action == ACTION_TRIGGER)
493                 return req;
494 no_trigger:
495         free(req);
496         return NULL;
497 }
498
499 static void
500 async_reply_handle(void *arg)
501 {
502         struct pending_request *req;
503
504         pthread_mutex_lock(&pending_requests.lock);
505         req = async_reply_handle_thread_unsafe(arg);
506         pthread_mutex_unlock(&pending_requests.lock);
507
508         if (req != NULL)
509                 trigger_async_action(req);
510 }
511
512 static int
513 open_socket_fd(void)
514 {
515         struct sockaddr_un un;
516
517         peer_name[0] = '\0';
518         if (rte_eal_process_type() == RTE_PROC_SECONDARY)
519                 snprintf(peer_name, sizeof(peer_name),
520                                 "%d_%"PRIx64, getpid(), rte_rdtsc());
521
522         mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
523         if (mp_fd < 0) {
524                 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
525                 return -1;
526         }
527
528         memset(&un, 0, sizeof(un));
529         un.sun_family = AF_UNIX;
530
531         create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
532
533         unlink(un.sun_path); /* May still exist since last run */
534
535         if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
536                 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
537                         un.sun_path, strerror(errno));
538                 close(mp_fd);
539                 return -1;
540         }
541
542         RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
543         return mp_fd;
544 }
545
546 static void
547 close_socket_fd(void)
548 {
549         char path[PATH_MAX];
550
551         if (mp_fd < 0)
552                 return;
553
554         close(mp_fd);
555         create_socket_path(peer_name, path, sizeof(path));
556         unlink(path);
557 }
558
559 int
560 rte_mp_channel_init(void)
561 {
562         char path[PATH_MAX];
563         int dir_fd;
564         pthread_t mp_handle_tid;
565
566         /* in no shared files mode, we do not have secondary processes support,
567          * so no need to initialize IPC.
568          */
569         if (internal_config.no_shconf) {
570                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
571                 return 0;
572         }
573
574         /* create filter path */
575         create_socket_path("*", path, sizeof(path));
576         strlcpy(mp_filter, basename(path), sizeof(mp_filter));
577
578         /* path may have been modified, so recreate it */
579         create_socket_path("*", path, sizeof(path));
580         strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
581
582         /* lock the directory */
583         dir_fd = open(mp_dir_path, O_RDONLY);
584         if (dir_fd < 0) {
585                 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
586                         mp_dir_path, strerror(errno));
587                 return -1;
588         }
589
590         if (flock(dir_fd, LOCK_EX)) {
591                 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
592                         mp_dir_path, strerror(errno));
593                 close(dir_fd);
594                 return -1;
595         }
596
597         if (open_socket_fd() < 0) {
598                 close(dir_fd);
599                 return -1;
600         }
601
602         if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
603                         NULL, mp_handle, NULL) < 0) {
604                 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
605                         strerror(errno));
606                 close(mp_fd);
607                 close(dir_fd);
608                 mp_fd = -1;
609                 return -1;
610         }
611
612         /* unlock the directory */
613         flock(dir_fd, LOCK_UN);
614         close(dir_fd);
615
616         return 0;
617 }
618
619 void
620 rte_mp_channel_cleanup(void)
621 {
622         close_socket_fd();
623 }
624
625 /**
626  * Return -1, as fail to send message and it's caused by the local side.
627  * Return 0, as fail to send message and it's caused by the remote side.
628  * Return 1, as succeed to send message.
629  *
630  */
631 static int
632 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
633 {
634         int snd;
635         struct iovec iov;
636         struct msghdr msgh;
637         struct cmsghdr *cmsg;
638         struct sockaddr_un dst;
639         struct mp_msg_internal m;
640         int fd_size = msg->num_fds * sizeof(int);
641         char control[CMSG_SPACE(fd_size)];
642
643         m.type = type;
644         memcpy(&m.msg, msg, sizeof(*msg));
645
646         memset(&dst, 0, sizeof(dst));
647         dst.sun_family = AF_UNIX;
648         strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
649
650         memset(&msgh, 0, sizeof(msgh));
651         memset(control, 0, sizeof(control));
652
653         iov.iov_base = &m;
654         iov.iov_len = sizeof(m) - sizeof(msg->fds);
655
656         msgh.msg_name = &dst;
657         msgh.msg_namelen = sizeof(dst);
658         msgh.msg_iov = &iov;
659         msgh.msg_iovlen = 1;
660         msgh.msg_control = control;
661         msgh.msg_controllen = sizeof(control);
662
663         cmsg = CMSG_FIRSTHDR(&msgh);
664         cmsg->cmsg_len = CMSG_LEN(fd_size);
665         cmsg->cmsg_level = SOL_SOCKET;
666         cmsg->cmsg_type = SCM_RIGHTS;
667         memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
668
669         do {
670                 snd = sendmsg(mp_fd, &msgh, 0);
671         } while (snd < 0 && errno == EINTR);
672
673         if (snd < 0) {
674                 rte_errno = errno;
675                 /* Check if it caused by peer process exits */
676                 if (errno == ECONNREFUSED &&
677                                 rte_eal_process_type() == RTE_PROC_PRIMARY) {
678                         unlink(dst_path);
679                         return 0;
680                 }
681                 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
682                         dst_path, strerror(errno));
683                 return -1;
684         }
685
686         return 1;
687 }
688
689 static int
690 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
691 {
692         int dir_fd, ret = 0;
693         DIR *mp_dir;
694         struct dirent *ent;
695
696         if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
697                 peer = eal_mp_socket_path();
698
699         if (peer) {
700                 if (send_msg(peer, msg, type) < 0)
701                         return -1;
702                 else
703                         return 0;
704         }
705
706         /* broadcast to all secondary processes */
707         mp_dir = opendir(mp_dir_path);
708         if (!mp_dir) {
709                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
710                                 mp_dir_path);
711                 rte_errno = errno;
712                 return -1;
713         }
714
715         dir_fd = dirfd(mp_dir);
716         /* lock the directory to prevent processes spinning up while we send */
717         if (flock(dir_fd, LOCK_SH)) {
718                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
719                         mp_dir_path);
720                 rte_errno = errno;
721                 closedir(mp_dir);
722                 return -1;
723         }
724
725         while ((ent = readdir(mp_dir))) {
726                 char path[PATH_MAX];
727
728                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
729                         continue;
730
731                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
732                          ent->d_name);
733                 if (send_msg(path, msg, type) < 0)
734                         ret = -1;
735         }
736         /* unlock the dir */
737         flock(dir_fd, LOCK_UN);
738
739         /* dir_fd automatically closed on closedir */
740         closedir(mp_dir);
741         return ret;
742 }
743
744 static bool
745 check_input(const struct rte_mp_msg *msg)
746 {
747         if (msg == NULL) {
748                 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
749                 rte_errno = EINVAL;
750                 return false;
751         }
752
753         if (validate_action_name(msg->name))
754                 return false;
755
756         if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
757                 RTE_LOG(ERR, EAL, "Message data is too long\n");
758                 rte_errno = E2BIG;
759                 return false;
760         }
761
762         if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
763                 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
764                         RTE_MP_MAX_FD_NUM);
765                 rte_errno = E2BIG;
766                 return false;
767         }
768
769         return true;
770 }
771
772 int __rte_experimental
773 rte_mp_sendmsg(struct rte_mp_msg *msg)
774 {
775         if (!check_input(msg))
776                 return -1;
777
778         RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
779         return mp_send(msg, NULL, MP_MSG);
780 }
781
782 static int
783 mp_request_async(const char *dst, struct rte_mp_msg *req,
784                 struct async_request_param *param, const struct timespec *ts)
785 {
786         struct rte_mp_msg *reply_msg;
787         struct pending_request *pending_req, *exist;
788         int ret = -1;
789
790         pending_req = calloc(1, sizeof(*pending_req));
791         reply_msg = calloc(1, sizeof(*reply_msg));
792         if (pending_req == NULL || reply_msg == NULL) {
793                 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
794                 rte_errno = ENOMEM;
795                 ret = -1;
796                 goto fail;
797         }
798
799         pending_req->type = REQUEST_TYPE_ASYNC;
800         strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
801         pending_req->request = req;
802         pending_req->reply = reply_msg;
803         pending_req->async.param = param;
804
805         /* queue already locked by caller */
806
807         exist = find_pending_request(dst, req->name);
808         if (exist) {
809                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
810                 rte_errno = EEXIST;
811                 ret = -1;
812                 goto fail;
813         }
814
815         ret = send_msg(dst, req, MP_REQ);
816         if (ret < 0) {
817                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
818                         dst, req->name);
819                 ret = -1;
820                 goto fail;
821         } else if (ret == 0) {
822                 ret = 0;
823                 goto fail;
824         }
825         param->user_reply.nb_sent++;
826
827         /* if alarm set fails, we simply ignore the reply */
828         if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
829                               async_reply_handle, pending_req) < 0) {
830                 RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
831                         dst, req->name);
832                 ret = -1;
833                 goto fail;
834         }
835         TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
836
837         return 0;
838 fail:
839         free(pending_req);
840         free(reply_msg);
841         return ret;
842 }
843
844 static int
845 mp_request_sync(const char *dst, struct rte_mp_msg *req,
846                struct rte_mp_reply *reply, const struct timespec *ts)
847 {
848         int ret;
849         struct rte_mp_msg msg, *tmp;
850         struct pending_request pending_req, *exist;
851
852         pending_req.type = REQUEST_TYPE_SYNC;
853         pending_req.reply_received = 0;
854         strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
855         pending_req.request = req;
856         pending_req.reply = &msg;
857         pthread_cond_init(&pending_req.sync.cond, NULL);
858
859         exist = find_pending_request(dst, req->name);
860         if (exist) {
861                 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
862                 rte_errno = EEXIST;
863                 return -1;
864         }
865
866         ret = send_msg(dst, req, MP_REQ);
867         if (ret < 0) {
868                 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
869                         dst, req->name);
870                 return -1;
871         } else if (ret == 0)
872                 return 0;
873
874         TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
875
876         reply->nb_sent++;
877
878         do {
879                 ret = pthread_cond_timedwait(&pending_req.sync.cond,
880                                 &pending_requests.lock, ts);
881         } while (ret != 0 && ret != ETIMEDOUT);
882
883         TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
884
885         if (pending_req.reply_received == 0) {
886                 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
887                         dst, req->name);
888                 rte_errno = ETIMEDOUT;
889                 return -1;
890         }
891         if (pending_req.reply_received == -1) {
892                 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
893                 /* not receiving this message is not an error, so decrement
894                  * number of sent messages
895                  */
896                 reply->nb_sent--;
897                 return 0;
898         }
899
900         tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
901         if (!tmp) {
902                 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
903                         dst, req->name);
904                 rte_errno = ENOMEM;
905                 return -1;
906         }
907         memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
908         reply->msgs = tmp;
909         reply->nb_received++;
910         return 0;
911 }
912
913 int __rte_experimental
914 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
915                 const struct timespec *ts)
916 {
917         int dir_fd, ret = 0;
918         DIR *mp_dir;
919         struct dirent *ent;
920         struct timeval now;
921         struct timespec end;
922
923         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
924
925         reply->nb_sent = 0;
926         reply->nb_received = 0;
927         reply->msgs = NULL;
928
929         if (check_input(req) == false)
930                 goto err;
931
932         if (internal_config.no_shconf) {
933                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
934                 return 0;
935         }
936
937         if (gettimeofday(&now, NULL) < 0) {
938                 RTE_LOG(ERR, EAL, "Failed to get current time\n");
939                 rte_errno = errno;
940                 goto err;
941         }
942
943         end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
944         end.tv_sec = now.tv_sec + ts->tv_sec +
945                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
946
947         /* for secondary process, send request to the primary process only */
948         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
949                 pthread_mutex_lock(&pending_requests.lock);
950                 ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
951                 pthread_mutex_unlock(&pending_requests.lock);
952                 if (ret)
953                         goto err;
954                 return ret;
955         }
956
957         /* for primary process, broadcast request, and collect reply 1 by 1 */
958         mp_dir = opendir(mp_dir_path);
959         if (!mp_dir) {
960                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
961                 rte_errno = errno;
962                 goto err;
963         }
964
965         dir_fd = dirfd(mp_dir);
966         /* lock the directory to prevent processes spinning up while we send */
967         if (flock(dir_fd, LOCK_SH)) {
968                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
969                         mp_dir_path);
970                 closedir(mp_dir);
971                 rte_errno = errno;
972                 goto err;
973         }
974
975         pthread_mutex_lock(&pending_requests.lock);
976         while ((ent = readdir(mp_dir))) {
977                 char path[PATH_MAX];
978
979                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
980                         continue;
981
982                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
983                          ent->d_name);
984
985                 /* unlocks the mutex while waiting for response,
986                  * locks on receive
987                  */
988                 if (mp_request_sync(path, req, reply, &end))
989                         goto err;
990         }
991         pthread_mutex_unlock(&pending_requests.lock);
992         /* unlock the directory */
993         flock(dir_fd, LOCK_UN);
994
995         /* dir_fd automatically closed on closedir */
996         closedir(mp_dir);
997         return ret;
998
999 err:
1000         free(reply->msgs);
1001         reply->nb_received = 0;
1002         reply->msgs = NULL;
1003         return -1;
1004 }
1005
1006 int __rte_experimental
1007 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1008                 rte_mp_async_reply_t clb)
1009 {
1010         struct rte_mp_msg *copy;
1011         struct pending_request *dummy;
1012         struct async_request_param *param;
1013         struct rte_mp_reply *reply;
1014         int dir_fd, ret = 0;
1015         DIR *mp_dir;
1016         struct dirent *ent;
1017         struct timeval now;
1018         struct timespec *end;
1019         bool dummy_used = false;
1020
1021         RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1022
1023         if (check_input(req) == false)
1024                 return -1;
1025
1026         if (internal_config.no_shconf) {
1027                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1028                 return 0;
1029         }
1030
1031         if (gettimeofday(&now, NULL) < 0) {
1032                 RTE_LOG(ERR, EAL, "Faile to get current time\n");
1033                 rte_errno = errno;
1034                 return -1;
1035         }
1036         copy = calloc(1, sizeof(*copy));
1037         dummy = calloc(1, sizeof(*dummy));
1038         param = calloc(1, sizeof(*param));
1039         if (copy == NULL || dummy == NULL || param == NULL) {
1040                 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1041                 rte_errno = ENOMEM;
1042                 goto fail;
1043         }
1044
1045         /* copy message */
1046         memcpy(copy, req, sizeof(*copy));
1047
1048         param->n_responses_processed = 0;
1049         param->clb = clb;
1050         end = &param->end;
1051         reply = &param->user_reply;
1052
1053         end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1054         end->tv_sec = now.tv_sec + ts->tv_sec +
1055                         (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1056         reply->nb_sent = 0;
1057         reply->nb_received = 0;
1058         reply->msgs = NULL;
1059
1060         /* we have to lock the request queue here, as we will be adding a bunch
1061          * of requests to the queue at once, and some of the replies may arrive
1062          * before we add all of the requests to the queue.
1063          */
1064         pthread_mutex_lock(&pending_requests.lock);
1065
1066         /* we have to ensure that callback gets triggered even if we don't send
1067          * anything, therefore earlier we have allocated a dummy request. fill
1068          * it, and put it on the queue if we don't send any requests.
1069          */
1070         dummy->type = REQUEST_TYPE_ASYNC;
1071         dummy->request = copy;
1072         dummy->reply = NULL;
1073         dummy->async.param = param;
1074         dummy->reply_received = 1; /* short-circuit the timeout */
1075
1076         /* for secondary process, send request to the primary process only */
1077         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1078                 ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
1079
1080                 /* if we didn't send anything, put dummy request on the queue */
1081                 if (ret == 0 && reply->nb_sent == 0) {
1082                         TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1083                                         next);
1084                         dummy_used = true;
1085                 }
1086
1087                 pthread_mutex_unlock(&pending_requests.lock);
1088
1089                 /* if we couldn't send anything, clean up */
1090                 if (ret != 0)
1091                         goto fail;
1092                 return 0;
1093         }
1094
1095         /* for primary process, broadcast request */
1096         mp_dir = opendir(mp_dir_path);
1097         if (!mp_dir) {
1098                 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1099                 rte_errno = errno;
1100                 goto unlock_fail;
1101         }
1102         dir_fd = dirfd(mp_dir);
1103
1104         /* lock the directory to prevent processes spinning up while we send */
1105         if (flock(dir_fd, LOCK_SH)) {
1106                 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1107                         mp_dir_path);
1108                 rte_errno = errno;
1109                 goto closedir_fail;
1110         }
1111
1112         while ((ent = readdir(mp_dir))) {
1113                 char path[PATH_MAX];
1114
1115                 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1116                         continue;
1117
1118                 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1119                          ent->d_name);
1120
1121                 if (mp_request_async(path, copy, param, ts))
1122                         ret = -1;
1123         }
1124         /* if we didn't send anything, put dummy request on the queue */
1125         if (ret == 0 && reply->nb_sent == 0) {
1126                 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1127                 dummy_used = true;
1128         }
1129
1130         /* finally, unlock the queue */
1131         pthread_mutex_unlock(&pending_requests.lock);
1132
1133         /* unlock the directory */
1134         flock(dir_fd, LOCK_UN);
1135
1136         /* dir_fd automatically closed on closedir */
1137         closedir(mp_dir);
1138
1139         /* if dummy was unused, free it */
1140         if (!dummy_used)
1141                 free(dummy);
1142
1143         return ret;
1144 closedir_fail:
1145         closedir(mp_dir);
1146 unlock_fail:
1147         pthread_mutex_unlock(&pending_requests.lock);
1148 fail:
1149         free(dummy);
1150         free(param);
1151         free(copy);
1152         return -1;
1153 }
1154
1155 int __rte_experimental
1156 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1157 {
1158         RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1159
1160         if (check_input(msg) == false)
1161                 return -1;
1162
1163         if (peer == NULL) {
1164                 RTE_LOG(ERR, EAL, "peer is not specified\n");
1165                 rte_errno = EINVAL;
1166                 return -1;
1167         }
1168
1169         if (internal_config.no_shconf) {
1170                 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1171                 return 0;
1172         }
1173
1174         return mp_send(msg, peer, MP_REP);
1175 }