1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
18 #include <sys/types.h>
19 #include <sys/socket.h>
23 #include <rte_alarm.h>
24 #include <rte_common.h>
25 #include <rte_cycles.h>
27 #include <rte_errno.h>
28 #include <rte_lcore.h>
30 #include <rte_tailq.h>
32 #include "eal_private.h"
33 #include "eal_filesystem.h"
34 #include "eal_internal_cfg.h"
36 static int mp_fd = -1;
37 static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
38 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
39 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
40 static char peer_name[PATH_MAX];
43 TAILQ_ENTRY(action_entry) next;
44 char action_name[RTE_MP_MAX_NAME_LEN];
48 /** Double linked list of actions. */
49 TAILQ_HEAD(action_entry_list, action_entry);
51 static struct action_entry_list action_entry_list =
52 TAILQ_HEAD_INITIALIZER(action_entry_list);
55 MP_MSG, /* Share message with peers, will not block */
56 MP_REQ, /* Request for information, Will block for a reply */
57 MP_REP, /* Response to previously-received request */
58 MP_IGN, /* Response telling requester to ignore this response */
61 struct mp_msg_internal {
63 struct rte_mp_msg msg;
66 struct async_request_param {
67 rte_mp_async_reply_t clb;
68 struct rte_mp_reply user_reply;
70 int n_responses_processed;
73 struct pending_request {
74 TAILQ_ENTRY(pending_request) next;
80 struct rte_mp_msg *request;
81 struct rte_mp_msg *reply;
86 struct async_request_param *param;
94 TAILQ_HEAD(pending_request_list, pending_request);
97 struct pending_request_list requests;
99 } pending_requests = {
100 .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
101 .lock = PTHREAD_MUTEX_INITIALIZER,
102 /**< used in async requests only */
105 /* forward declarations */
107 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
109 /* for use with alarm callback */
111 async_reply_handle(void *arg);
113 /* for use with process_msg */
114 static struct pending_request *
115 async_reply_handle_thread_unsafe(void *arg);
118 trigger_async_action(struct pending_request *req);
120 static struct pending_request *
121 find_pending_request(const char *dst, const char *act_name)
123 struct pending_request *r;
125 TAILQ_FOREACH(r, &pending_requests.requests, next) {
126 if (!strcmp(r->dst, dst) &&
127 !strcmp(r->request->name, act_name))
135 create_socket_path(const char *name, char *buf, int len)
137 const char *prefix = eal_mp_socket_path();
139 if (strlen(name) > 0)
140 snprintf(buf, len, "%s_%s", prefix, name);
142 strlcpy(buf, prefix, len);
146 rte_eal_primary_proc_alive(const char *config_file_path)
150 if (config_file_path)
151 config_fd = open(config_file_path, O_RDONLY);
155 path = eal_runtime_config_path();
156 config_fd = open(path, O_RDONLY);
161 int ret = lockf(config_fd, F_TEST, 0);
167 static struct action_entry *
168 find_action_entry_by_name(const char *name)
170 struct action_entry *entry;
172 TAILQ_FOREACH(entry, &action_entry_list, next) {
173 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
181 validate_action_name(const char *name)
184 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
188 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
189 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
193 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
201 rte_mp_action_register(const char *name, rte_mp_t action)
203 struct action_entry *entry;
204 const struct internal_config *internal_conf =
205 eal_get_internal_configuration();
207 if (validate_action_name(name) != 0)
210 if (internal_conf->no_shconf) {
211 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
216 entry = malloc(sizeof(struct action_entry));
221 strlcpy(entry->action_name, name, sizeof(entry->action_name));
222 entry->action = action;
224 pthread_mutex_lock(&mp_mutex_action);
225 if (find_action_entry_by_name(name) != NULL) {
226 pthread_mutex_unlock(&mp_mutex_action);
231 TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
232 pthread_mutex_unlock(&mp_mutex_action);
237 rte_mp_action_unregister(const char *name)
239 struct action_entry *entry;
240 const struct internal_config *internal_conf =
241 eal_get_internal_configuration();
243 if (validate_action_name(name) != 0)
246 if (internal_conf->no_shconf) {
247 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
251 pthread_mutex_lock(&mp_mutex_action);
252 entry = find_action_entry_by_name(name);
254 pthread_mutex_unlock(&mp_mutex_action);
257 TAILQ_REMOVE(&action_entry_list, entry, next);
258 pthread_mutex_unlock(&mp_mutex_action);
263 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
268 char control[CMSG_SPACE(sizeof(m->msg.fds))];
269 struct cmsghdr *cmsg;
270 int buflen = sizeof(*m) - sizeof(m->msg.fds);
272 memset(&msgh, 0, sizeof(msgh));
274 iov.iov_len = buflen;
277 msgh.msg_namelen = sizeof(*s);
280 msgh.msg_control = control;
281 msgh.msg_controllen = sizeof(control);
283 msglen = recvmsg(mp_fd, &msgh, 0);
285 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
289 if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
290 RTE_LOG(ERR, EAL, "truncated msg\n");
294 /* read auxiliary FDs if any */
295 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
296 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
297 if ((cmsg->cmsg_level == SOL_SOCKET) &&
298 (cmsg->cmsg_type == SCM_RIGHTS)) {
299 memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
303 /* sanity-check the response */
304 if (m->msg.num_fds < 0 || m->msg.num_fds > RTE_MP_MAX_FD_NUM) {
305 RTE_LOG(ERR, EAL, "invalid number of fd's received\n");
308 if (m->msg.len_param < 0 || m->msg.len_param > RTE_MP_MAX_PARAM_LEN) {
309 RTE_LOG(ERR, EAL, "invalid received data length\n");
316 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
318 struct pending_request *pending_req;
319 struct action_entry *entry;
320 struct rte_mp_msg *msg = &m->msg;
321 rte_mp_t action = NULL;
322 const struct internal_config *internal_conf =
323 eal_get_internal_configuration();
325 RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
327 if (m->type == MP_REP || m->type == MP_IGN) {
328 struct pending_request *req = NULL;
330 pthread_mutex_lock(&pending_requests.lock);
331 pending_req = find_pending_request(s->sun_path, msg->name);
333 memcpy(pending_req->reply, msg, sizeof(*msg));
334 /* -1 indicates that we've been asked to ignore */
335 pending_req->reply_received =
336 m->type == MP_REP ? 1 : -1;
338 if (pending_req->type == REQUEST_TYPE_SYNC)
339 pthread_cond_signal(&pending_req->sync.cond);
340 else if (pending_req->type == REQUEST_TYPE_ASYNC)
341 req = async_reply_handle_thread_unsafe(
344 RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
345 pthread_mutex_unlock(&pending_requests.lock);
348 trigger_async_action(req);
352 pthread_mutex_lock(&mp_mutex_action);
353 entry = find_action_entry_by_name(msg->name);
355 action = entry->action;
356 pthread_mutex_unlock(&mp_mutex_action);
359 if (m->type == MP_REQ && !internal_conf->init_complete) {
360 /* if this is a request, and init is not yet complete,
361 * and callback wasn't registered, we should tell the
362 * requester to ignore our existence because we're not
363 * yet ready to process this request.
365 struct rte_mp_msg dummy;
367 memset(&dummy, 0, sizeof(dummy));
368 strlcpy(dummy.name, msg->name, sizeof(dummy.name));
369 mp_send(&dummy, s->sun_path, MP_IGN);
371 RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
374 } else if (action(msg, s->sun_path) < 0) {
375 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
380 mp_handle(void *arg __rte_unused)
382 struct mp_msg_internal msg;
383 struct sockaddr_un sa;
386 if (read_msg(&msg, &sa) == 0)
387 process_msg(&msg, &sa);
394 timespec_cmp(const struct timespec *a, const struct timespec *b)
396 if (a->tv_sec < b->tv_sec)
398 if (a->tv_sec > b->tv_sec)
400 if (a->tv_nsec < b->tv_nsec)
402 if (a->tv_nsec > b->tv_nsec)
408 ACTION_FREE, /**< free the action entry, but don't trigger callback */
409 ACTION_TRIGGER /**< trigger callback, then free action entry */
412 static enum async_action
413 process_async_request(struct pending_request *sr, const struct timespec *now)
415 struct async_request_param *param;
416 struct rte_mp_reply *reply;
417 bool timeout, last_msg;
419 param = sr->async.param;
420 reply = ¶m->user_reply;
422 /* did we timeout? */
423 timeout = timespec_cmp(¶m->end, now) <= 0;
425 /* if we received a response, adjust relevant data and copy mesasge. */
426 if (sr->reply_received == 1 && sr->reply) {
427 struct rte_mp_msg *msg, *user_msgs, *tmp;
430 user_msgs = reply->msgs;
432 tmp = realloc(user_msgs, sizeof(*msg) *
433 (reply->nb_received + 1));
435 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
436 sr->dst, sr->request->name);
437 /* this entry is going to be removed and its message
438 * dropped, but we don't want to leak memory, so
443 reply->msgs = user_msgs;
444 memcpy(&user_msgs[reply->nb_received],
446 reply->nb_received++;
449 /* mark this request as processed */
450 param->n_responses_processed++;
451 } else if (sr->reply_received == -1) {
452 /* we were asked to ignore this process */
454 } else if (timeout) {
455 /* count it as processed response, but don't increment
458 param->n_responses_processed++;
463 last_msg = param->n_responses_processed == reply->nb_sent;
465 return last_msg ? ACTION_TRIGGER : ACTION_FREE;
469 trigger_async_action(struct pending_request *sr)
471 struct async_request_param *param;
472 struct rte_mp_reply *reply;
474 param = sr->async.param;
475 reply = ¶m->user_reply;
477 param->clb(sr->request, reply);
480 free(sr->async.param->user_reply.msgs);
481 free(sr->async.param);
486 static struct pending_request *
487 async_reply_handle_thread_unsafe(void *arg)
489 struct pending_request *req = (struct pending_request *)arg;
490 enum async_action action;
491 struct timespec ts_now;
494 if (gettimeofday(&now, NULL) < 0) {
495 RTE_LOG(ERR, EAL, "Cannot get current time\n");
498 ts_now.tv_nsec = now.tv_usec * 1000;
499 ts_now.tv_sec = now.tv_sec;
501 action = process_async_request(req, &ts_now);
503 TAILQ_REMOVE(&pending_requests.requests, req, next);
505 if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
506 /* if we failed to cancel the alarm because it's already in
507 * progress, don't proceed because otherwise we will end up
508 * handling the same message twice.
510 if (rte_errno == EINPROGRESS) {
511 RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
514 RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
517 if (action == ACTION_TRIGGER)
525 async_reply_handle(void *arg)
527 struct pending_request *req;
529 pthread_mutex_lock(&pending_requests.lock);
530 req = async_reply_handle_thread_unsafe(arg);
531 pthread_mutex_unlock(&pending_requests.lock);
534 trigger_async_action(req);
540 struct sockaddr_un un;
543 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
544 snprintf(peer_name, sizeof(peer_name),
545 "%d_%"PRIx64, getpid(), rte_rdtsc());
547 mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
549 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
553 memset(&un, 0, sizeof(un));
554 un.sun_family = AF_UNIX;
556 create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
558 unlink(un.sun_path); /* May still exist since last run */
560 if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
561 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
562 un.sun_path, strerror(errno));
567 RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
572 close_socket_fd(void)
580 create_socket_path(peer_name, path, sizeof(path));
585 rte_mp_channel_init(void)
589 pthread_t mp_handle_tid;
590 const struct internal_config *internal_conf =
591 eal_get_internal_configuration();
593 /* in no shared files mode, we do not have secondary processes support,
594 * so no need to initialize IPC.
596 if (internal_conf->no_shconf) {
597 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
602 /* create filter path */
603 create_socket_path("*", path, sizeof(path));
604 strlcpy(mp_filter, basename(path), sizeof(mp_filter));
606 /* path may have been modified, so recreate it */
607 create_socket_path("*", path, sizeof(path));
608 strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
610 /* lock the directory */
611 dir_fd = open(mp_dir_path, O_RDONLY);
613 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
614 mp_dir_path, strerror(errno));
618 if (flock(dir_fd, LOCK_EX)) {
619 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
620 mp_dir_path, strerror(errno));
625 if (open_socket_fd() < 0) {
630 if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
631 NULL, mp_handle, NULL) < 0) {
632 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
640 /* unlock the directory */
641 flock(dir_fd, LOCK_UN);
648 rte_mp_channel_cleanup(void)
654 * Return -1, as fail to send message and it's caused by the local side.
655 * Return 0, as fail to send message and it's caused by the remote side.
656 * Return 1, as succeed to send message.
660 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
665 struct cmsghdr *cmsg;
666 struct sockaddr_un dst;
667 struct mp_msg_internal m;
668 int fd_size = msg->num_fds * sizeof(int);
669 char control[CMSG_SPACE(fd_size)];
672 memcpy(&m.msg, msg, sizeof(*msg));
674 memset(&dst, 0, sizeof(dst));
675 dst.sun_family = AF_UNIX;
676 strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
678 memset(&msgh, 0, sizeof(msgh));
679 memset(control, 0, sizeof(control));
682 iov.iov_len = sizeof(m) - sizeof(msg->fds);
684 msgh.msg_name = &dst;
685 msgh.msg_namelen = sizeof(dst);
688 msgh.msg_control = control;
689 msgh.msg_controllen = sizeof(control);
691 cmsg = CMSG_FIRSTHDR(&msgh);
692 cmsg->cmsg_len = CMSG_LEN(fd_size);
693 cmsg->cmsg_level = SOL_SOCKET;
694 cmsg->cmsg_type = SCM_RIGHTS;
695 memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
698 snd = sendmsg(mp_fd, &msgh, 0);
699 } while (snd < 0 && errno == EINTR);
703 /* Check if it caused by peer process exits */
704 if (errno == ECONNREFUSED &&
705 rte_eal_process_type() == RTE_PROC_PRIMARY) {
709 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
710 dst_path, strerror(errno));
718 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
724 if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
725 peer = eal_mp_socket_path();
728 if (send_msg(peer, msg, type) < 0)
734 /* broadcast to all secondary processes */
735 mp_dir = opendir(mp_dir_path);
737 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
743 dir_fd = dirfd(mp_dir);
744 /* lock the directory to prevent processes spinning up while we send */
745 if (flock(dir_fd, LOCK_SH)) {
746 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
753 while ((ent = readdir(mp_dir))) {
756 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
759 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
761 if (send_msg(path, msg, type) < 0)
765 flock(dir_fd, LOCK_UN);
767 /* dir_fd automatically closed on closedir */
773 check_input(const struct rte_mp_msg *msg)
776 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
781 if (validate_action_name(msg->name) != 0)
784 if (msg->len_param < 0) {
785 RTE_LOG(ERR, EAL, "Message data length is negative\n");
790 if (msg->num_fds < 0) {
791 RTE_LOG(ERR, EAL, "Number of fd's is negative\n");
796 if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
797 RTE_LOG(ERR, EAL, "Message data is too long\n");
802 if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
803 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
813 rte_mp_sendmsg(struct rte_mp_msg *msg)
815 const struct internal_config *internal_conf =
816 eal_get_internal_configuration();
818 if (check_input(msg) != 0)
821 if (internal_conf->no_shconf) {
822 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
827 RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
828 return mp_send(msg, NULL, MP_MSG);
832 mp_request_async(const char *dst, struct rte_mp_msg *req,
833 struct async_request_param *param, const struct timespec *ts)
835 struct rte_mp_msg *reply_msg;
836 struct pending_request *pending_req, *exist;
839 pending_req = calloc(1, sizeof(*pending_req));
840 reply_msg = calloc(1, sizeof(*reply_msg));
841 if (pending_req == NULL || reply_msg == NULL) {
842 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
848 pending_req->type = REQUEST_TYPE_ASYNC;
849 strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
850 pending_req->request = req;
851 pending_req->reply = reply_msg;
852 pending_req->async.param = param;
854 /* queue already locked by caller */
856 exist = find_pending_request(dst, req->name);
858 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
864 ret = send_msg(dst, req, MP_REQ);
866 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
870 } else if (ret == 0) {
874 param->user_reply.nb_sent++;
876 /* if alarm set fails, we simply ignore the reply */
877 if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
878 async_reply_handle, pending_req) < 0) {
879 RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
884 TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
894 mp_request_sync(const char *dst, struct rte_mp_msg *req,
895 struct rte_mp_reply *reply, const struct timespec *ts)
898 struct rte_mp_msg msg, *tmp;
899 struct pending_request pending_req, *exist;
901 pending_req.type = REQUEST_TYPE_SYNC;
902 pending_req.reply_received = 0;
903 strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
904 pending_req.request = req;
905 pending_req.reply = &msg;
906 pthread_cond_init(&pending_req.sync.cond, NULL);
908 exist = find_pending_request(dst, req->name);
910 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
915 ret = send_msg(dst, req, MP_REQ);
917 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
923 TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
928 ret = pthread_cond_timedwait(&pending_req.sync.cond,
929 &pending_requests.lock, ts);
930 } while (ret != 0 && ret != ETIMEDOUT);
932 TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
934 if (pending_req.reply_received == 0) {
935 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
937 rte_errno = ETIMEDOUT;
940 if (pending_req.reply_received == -1) {
941 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
942 /* not receiving this message is not an error, so decrement
943 * number of sent messages
949 tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
951 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
956 memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
958 reply->nb_received++;
963 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
964 const struct timespec *ts)
966 int dir_fd, ret = -1;
971 const struct internal_config *internal_conf =
972 eal_get_internal_configuration();
974 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
977 reply->nb_received = 0;
980 if (check_input(req) != 0)
983 if (internal_conf->no_shconf) {
984 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
989 if (gettimeofday(&now, NULL) < 0) {
990 RTE_LOG(ERR, EAL, "Failed to get current time\n");
995 end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
996 end.tv_sec = now.tv_sec + ts->tv_sec +
997 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
999 /* for secondary process, send request to the primary process only */
1000 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1001 pthread_mutex_lock(&pending_requests.lock);
1002 ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
1003 pthread_mutex_unlock(&pending_requests.lock);
1007 /* for primary process, broadcast request, and collect reply 1 by 1 */
1008 mp_dir = opendir(mp_dir_path);
1010 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1015 dir_fd = dirfd(mp_dir);
1016 /* lock the directory to prevent processes spinning up while we send */
1017 if (flock(dir_fd, LOCK_SH)) {
1018 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1024 pthread_mutex_lock(&pending_requests.lock);
1025 while ((ent = readdir(mp_dir))) {
1026 char path[PATH_MAX];
1028 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1031 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1034 /* unlocks the mutex while waiting for response,
1037 if (mp_request_sync(path, req, reply, &end))
1043 pthread_mutex_unlock(&pending_requests.lock);
1044 /* unlock the directory */
1045 flock(dir_fd, LOCK_UN);
1048 /* dir_fd automatically closed on closedir */
1054 reply->nb_received = 0;
1061 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1062 rte_mp_async_reply_t clb)
1064 struct rte_mp_msg *copy;
1065 struct pending_request *dummy;
1066 struct async_request_param *param;
1067 struct rte_mp_reply *reply;
1068 int dir_fd, ret = 0;
1072 struct timespec *end;
1073 bool dummy_used = false;
1074 const struct internal_config *internal_conf =
1075 eal_get_internal_configuration();
1077 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1079 if (check_input(req) != 0)
1082 if (internal_conf->no_shconf) {
1083 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1084 rte_errno = ENOTSUP;
1088 if (gettimeofday(&now, NULL) < 0) {
1089 RTE_LOG(ERR, EAL, "Failed to get current time\n");
1093 copy = calloc(1, sizeof(*copy));
1094 dummy = calloc(1, sizeof(*dummy));
1095 param = calloc(1, sizeof(*param));
1096 if (copy == NULL || dummy == NULL || param == NULL) {
1097 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1103 memcpy(copy, req, sizeof(*copy));
1105 param->n_responses_processed = 0;
1108 reply = ¶m->user_reply;
1110 end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1111 end->tv_sec = now.tv_sec + ts->tv_sec +
1112 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1114 reply->nb_received = 0;
1117 /* we have to lock the request queue here, as we will be adding a bunch
1118 * of requests to the queue at once, and some of the replies may arrive
1119 * before we add all of the requests to the queue.
1121 pthread_mutex_lock(&pending_requests.lock);
1123 /* we have to ensure that callback gets triggered even if we don't send
1124 * anything, therefore earlier we have allocated a dummy request. fill
1125 * it, and put it on the queue if we don't send any requests.
1127 dummy->type = REQUEST_TYPE_ASYNC;
1128 dummy->request = copy;
1129 dummy->reply = NULL;
1130 dummy->async.param = param;
1131 dummy->reply_received = 1; /* short-circuit the timeout */
1133 /* for secondary process, send request to the primary process only */
1134 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1135 ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
1137 /* if we didn't send anything, put dummy request on the queue */
1138 if (ret == 0 && reply->nb_sent == 0) {
1139 TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1144 pthread_mutex_unlock(&pending_requests.lock);
1146 /* if we couldn't send anything, clean up */
1152 /* for primary process, broadcast request */
1153 mp_dir = opendir(mp_dir_path);
1155 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1159 dir_fd = dirfd(mp_dir);
1161 /* lock the directory to prevent processes spinning up while we send */
1162 if (flock(dir_fd, LOCK_SH)) {
1163 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1169 while ((ent = readdir(mp_dir))) {
1170 char path[PATH_MAX];
1172 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1175 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1178 if (mp_request_async(path, copy, param, ts))
1181 /* if we didn't send anything, put dummy request on the queue */
1182 if (ret == 0 && reply->nb_sent == 0) {
1183 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1187 /* finally, unlock the queue */
1188 pthread_mutex_unlock(&pending_requests.lock);
1190 /* unlock the directory */
1191 flock(dir_fd, LOCK_UN);
1193 /* dir_fd automatically closed on closedir */
1196 /* if dummy was unused, free it */
1204 pthread_mutex_unlock(&pending_requests.lock);
1213 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1215 RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1216 const struct internal_config *internal_conf =
1217 eal_get_internal_configuration();
1219 if (check_input(msg) != 0)
1223 RTE_LOG(ERR, EAL, "peer is not specified\n");
1228 if (internal_conf->no_shconf) {
1229 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1233 return mp_send(msg, peer, MP_REP);