1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
18 #include <sys/types.h>
19 #include <sys/socket.h>
23 #include <rte_alarm.h>
24 #include <rte_common.h>
25 #include <rte_cycles.h>
27 #include <rte_errno.h>
28 #include <rte_lcore.h>
30 #include <rte_tailq.h>
32 #include "eal_private.h"
33 #include "eal_filesystem.h"
34 #include "eal_internal_cfg.h"
36 static int mp_fd = -1;
37 static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
38 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
39 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
40 static char peer_name[PATH_MAX];
43 TAILQ_ENTRY(action_entry) next;
44 char action_name[RTE_MP_MAX_NAME_LEN];
48 /** Double linked list of actions. */
49 TAILQ_HEAD(action_entry_list, action_entry);
51 static struct action_entry_list action_entry_list =
52 TAILQ_HEAD_INITIALIZER(action_entry_list);
55 MP_MSG, /* Share message with peers, will not block */
56 MP_REQ, /* Request for information, Will block for a reply */
57 MP_REP, /* Response to previously-received request */
58 MP_IGN, /* Response telling requester to ignore this response */
61 struct mp_msg_internal {
63 struct rte_mp_msg msg;
66 struct async_request_param {
67 rte_mp_async_reply_t clb;
68 struct rte_mp_reply user_reply;
70 int n_responses_processed;
73 struct pending_request {
74 TAILQ_ENTRY(pending_request) next;
80 struct rte_mp_msg *request;
81 struct rte_mp_msg *reply;
86 struct async_request_param *param;
94 TAILQ_HEAD(pending_request_list, pending_request);
97 struct pending_request_list requests;
99 } pending_requests = {
100 .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
101 .lock = PTHREAD_MUTEX_INITIALIZER,
102 /**< used in async requests only */
105 /* forward declarations */
107 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
109 /* for use with alarm callback */
111 async_reply_handle(void *arg);
113 /* for use with process_msg */
114 static struct pending_request *
115 async_reply_handle_thread_unsafe(void *arg);
118 trigger_async_action(struct pending_request *req);
120 static struct pending_request *
121 find_pending_request(const char *dst, const char *act_name)
123 struct pending_request *r;
125 TAILQ_FOREACH(r, &pending_requests.requests, next) {
126 if (!strcmp(r->dst, dst) &&
127 !strcmp(r->request->name, act_name))
135 create_socket_path(const char *name, char *buf, int len)
137 const char *prefix = eal_mp_socket_path();
139 if (strlen(name) > 0)
140 snprintf(buf, len, "%s_%s", prefix, name);
142 strlcpy(buf, prefix, len);
146 rte_eal_primary_proc_alive(const char *config_file_path)
150 if (config_file_path)
151 config_fd = open(config_file_path, O_RDONLY);
155 path = eal_runtime_config_path();
156 config_fd = open(path, O_RDONLY);
161 int ret = lockf(config_fd, F_TEST, 0);
167 static struct action_entry *
168 find_action_entry_by_name(const char *name)
170 struct action_entry *entry;
172 TAILQ_FOREACH(entry, &action_entry_list, next) {
173 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
181 validate_action_name(const char *name)
184 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
188 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
189 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
193 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
200 int __rte_experimental
201 rte_mp_action_register(const char *name, rte_mp_t action)
203 struct action_entry *entry;
205 if (validate_action_name(name) != 0)
208 entry = malloc(sizeof(struct action_entry));
213 strlcpy(entry->action_name, name, sizeof(entry->action_name));
214 entry->action = action;
216 pthread_mutex_lock(&mp_mutex_action);
217 if (find_action_entry_by_name(name) != NULL) {
218 pthread_mutex_unlock(&mp_mutex_action);
223 TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
224 pthread_mutex_unlock(&mp_mutex_action);
228 void __rte_experimental
229 rte_mp_action_unregister(const char *name)
231 struct action_entry *entry;
233 if (validate_action_name(name) != 0)
236 pthread_mutex_lock(&mp_mutex_action);
237 entry = find_action_entry_by_name(name);
239 pthread_mutex_unlock(&mp_mutex_action);
242 TAILQ_REMOVE(&action_entry_list, entry, next);
243 pthread_mutex_unlock(&mp_mutex_action);
248 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
253 char control[CMSG_SPACE(sizeof(m->msg.fds))];
254 struct cmsghdr *cmsg;
255 int buflen = sizeof(*m) - sizeof(m->msg.fds);
257 memset(&msgh, 0, sizeof(msgh));
259 iov.iov_len = buflen;
262 msgh.msg_namelen = sizeof(*s);
265 msgh.msg_control = control;
266 msgh.msg_controllen = sizeof(control);
268 msglen = recvmsg(mp_fd, &msgh, 0);
270 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
274 if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
275 RTE_LOG(ERR, EAL, "truncted msg\n");
279 /* read auxiliary FDs if any */
280 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
281 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
282 if ((cmsg->cmsg_level == SOL_SOCKET) &&
283 (cmsg->cmsg_type == SCM_RIGHTS)) {
284 memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
288 /* sanity-check the response */
289 if (m->msg.num_fds < 0 || m->msg.num_fds > RTE_MP_MAX_FD_NUM) {
290 RTE_LOG(ERR, EAL, "invalid number of fd's received\n");
293 if (m->msg.len_param < 0 || m->msg.len_param > RTE_MP_MAX_PARAM_LEN) {
294 RTE_LOG(ERR, EAL, "invalid received data length\n");
301 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
303 struct pending_request *pending_req;
304 struct action_entry *entry;
305 struct rte_mp_msg *msg = &m->msg;
306 rte_mp_t action = NULL;
308 RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
310 if (m->type == MP_REP || m->type == MP_IGN) {
311 struct pending_request *req = NULL;
313 pthread_mutex_lock(&pending_requests.lock);
314 pending_req = find_pending_request(s->sun_path, msg->name);
316 memcpy(pending_req->reply, msg, sizeof(*msg));
317 /* -1 indicates that we've been asked to ignore */
318 pending_req->reply_received =
319 m->type == MP_REP ? 1 : -1;
321 if (pending_req->type == REQUEST_TYPE_SYNC)
322 pthread_cond_signal(&pending_req->sync.cond);
323 else if (pending_req->type == REQUEST_TYPE_ASYNC)
324 req = async_reply_handle_thread_unsafe(
327 RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
328 pthread_mutex_unlock(&pending_requests.lock);
331 trigger_async_action(req);
335 pthread_mutex_lock(&mp_mutex_action);
336 entry = find_action_entry_by_name(msg->name);
338 action = entry->action;
339 pthread_mutex_unlock(&mp_mutex_action);
342 if (m->type == MP_REQ && !internal_config.init_complete) {
343 /* if this is a request, and init is not yet complete,
344 * and callback wasn't registered, we should tell the
345 * requester to ignore our existence because we're not
346 * yet ready to process this request.
348 struct rte_mp_msg dummy;
350 memset(&dummy, 0, sizeof(dummy));
351 strlcpy(dummy.name, msg->name, sizeof(dummy.name));
352 mp_send(&dummy, s->sun_path, MP_IGN);
354 RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
357 } else if (action(msg, s->sun_path) < 0) {
358 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
363 mp_handle(void *arg __rte_unused)
365 struct mp_msg_internal msg;
366 struct sockaddr_un sa;
369 if (read_msg(&msg, &sa) == 0)
370 process_msg(&msg, &sa);
377 timespec_cmp(const struct timespec *a, const struct timespec *b)
379 if (a->tv_sec < b->tv_sec)
381 if (a->tv_sec > b->tv_sec)
383 if (a->tv_nsec < b->tv_nsec)
385 if (a->tv_nsec > b->tv_nsec)
391 ACTION_FREE, /**< free the action entry, but don't trigger callback */
392 ACTION_TRIGGER /**< trigger callback, then free action entry */
395 static enum async_action
396 process_async_request(struct pending_request *sr, const struct timespec *now)
398 struct async_request_param *param;
399 struct rte_mp_reply *reply;
400 bool timeout, last_msg;
402 param = sr->async.param;
403 reply = ¶m->user_reply;
405 /* did we timeout? */
406 timeout = timespec_cmp(¶m->end, now) <= 0;
408 /* if we received a response, adjust relevant data and copy mesasge. */
409 if (sr->reply_received == 1 && sr->reply) {
410 struct rte_mp_msg *msg, *user_msgs, *tmp;
413 user_msgs = reply->msgs;
415 tmp = realloc(user_msgs, sizeof(*msg) *
416 (reply->nb_received + 1));
418 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
419 sr->dst, sr->request->name);
420 /* this entry is going to be removed and its message
421 * dropped, but we don't want to leak memory, so
426 reply->msgs = user_msgs;
427 memcpy(&user_msgs[reply->nb_received],
429 reply->nb_received++;
432 /* mark this request as processed */
433 param->n_responses_processed++;
434 } else if (sr->reply_received == -1) {
435 /* we were asked to ignore this process */
437 } else if (timeout) {
438 /* count it as processed response, but don't increment
441 param->n_responses_processed++;
446 last_msg = param->n_responses_processed == reply->nb_sent;
448 return last_msg ? ACTION_TRIGGER : ACTION_FREE;
452 trigger_async_action(struct pending_request *sr)
454 struct async_request_param *param;
455 struct rte_mp_reply *reply;
457 param = sr->async.param;
458 reply = ¶m->user_reply;
460 param->clb(sr->request, reply);
463 free(sr->async.param->user_reply.msgs);
464 free(sr->async.param);
469 static struct pending_request *
470 async_reply_handle_thread_unsafe(void *arg)
472 struct pending_request *req = (struct pending_request *)arg;
473 enum async_action action;
474 struct timespec ts_now;
477 if (gettimeofday(&now, NULL) < 0) {
478 RTE_LOG(ERR, EAL, "Cannot get current time\n");
481 ts_now.tv_nsec = now.tv_usec * 1000;
482 ts_now.tv_sec = now.tv_sec;
484 action = process_async_request(req, &ts_now);
486 TAILQ_REMOVE(&pending_requests.requests, req, next);
488 if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
489 /* if we failed to cancel the alarm because it's already in
490 * progress, don't proceed because otherwise we will end up
491 * handling the same message twice.
493 if (rte_errno == EINPROGRESS) {
494 RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
497 RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
500 if (action == ACTION_TRIGGER)
508 async_reply_handle(void *arg)
510 struct pending_request *req;
512 pthread_mutex_lock(&pending_requests.lock);
513 req = async_reply_handle_thread_unsafe(arg);
514 pthread_mutex_unlock(&pending_requests.lock);
517 trigger_async_action(req);
523 struct sockaddr_un un;
526 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
527 snprintf(peer_name, sizeof(peer_name),
528 "%d_%"PRIx64, getpid(), rte_rdtsc());
530 mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
532 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
536 memset(&un, 0, sizeof(un));
537 un.sun_family = AF_UNIX;
539 create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
541 unlink(un.sun_path); /* May still exist since last run */
543 if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
544 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
545 un.sun_path, strerror(errno));
550 RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
555 close_socket_fd(void)
563 create_socket_path(peer_name, path, sizeof(path));
568 rte_mp_channel_init(void)
572 pthread_t mp_handle_tid;
574 /* in no shared files mode, we do not have secondary processes support,
575 * so no need to initialize IPC.
577 if (internal_config.no_shconf) {
578 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
582 /* create filter path */
583 create_socket_path("*", path, sizeof(path));
584 strlcpy(mp_filter, basename(path), sizeof(mp_filter));
586 /* path may have been modified, so recreate it */
587 create_socket_path("*", path, sizeof(path));
588 strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
590 /* lock the directory */
591 dir_fd = open(mp_dir_path, O_RDONLY);
593 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
594 mp_dir_path, strerror(errno));
598 if (flock(dir_fd, LOCK_EX)) {
599 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
600 mp_dir_path, strerror(errno));
605 if (open_socket_fd() < 0) {
610 if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
611 NULL, mp_handle, NULL) < 0) {
612 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
620 /* unlock the directory */
621 flock(dir_fd, LOCK_UN);
628 rte_mp_channel_cleanup(void)
634 * Return -1, as fail to send message and it's caused by the local side.
635 * Return 0, as fail to send message and it's caused by the remote side.
636 * Return 1, as succeed to send message.
640 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
645 struct cmsghdr *cmsg;
646 struct sockaddr_un dst;
647 struct mp_msg_internal m;
648 int fd_size = msg->num_fds * sizeof(int);
649 char control[CMSG_SPACE(fd_size)];
652 memcpy(&m.msg, msg, sizeof(*msg));
654 memset(&dst, 0, sizeof(dst));
655 dst.sun_family = AF_UNIX;
656 strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
658 memset(&msgh, 0, sizeof(msgh));
659 memset(control, 0, sizeof(control));
662 iov.iov_len = sizeof(m) - sizeof(msg->fds);
664 msgh.msg_name = &dst;
665 msgh.msg_namelen = sizeof(dst);
668 msgh.msg_control = control;
669 msgh.msg_controllen = sizeof(control);
671 cmsg = CMSG_FIRSTHDR(&msgh);
672 cmsg->cmsg_len = CMSG_LEN(fd_size);
673 cmsg->cmsg_level = SOL_SOCKET;
674 cmsg->cmsg_type = SCM_RIGHTS;
675 memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
678 snd = sendmsg(mp_fd, &msgh, 0);
679 } while (snd < 0 && errno == EINTR);
683 /* Check if it caused by peer process exits */
684 if (errno == ECONNREFUSED &&
685 rte_eal_process_type() == RTE_PROC_PRIMARY) {
689 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
690 dst_path, strerror(errno));
698 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
704 if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
705 peer = eal_mp_socket_path();
708 if (send_msg(peer, msg, type) < 0)
714 /* broadcast to all secondary processes */
715 mp_dir = opendir(mp_dir_path);
717 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
723 dir_fd = dirfd(mp_dir);
724 /* lock the directory to prevent processes spinning up while we send */
725 if (flock(dir_fd, LOCK_SH)) {
726 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
733 while ((ent = readdir(mp_dir))) {
736 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
739 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
741 if (send_msg(path, msg, type) < 0)
745 flock(dir_fd, LOCK_UN);
747 /* dir_fd automatically closed on closedir */
753 check_input(const struct rte_mp_msg *msg)
756 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
761 if (validate_action_name(msg->name) != 0)
764 if (msg->len_param < 0) {
765 RTE_LOG(ERR, EAL, "Message data length is negative\n");
770 if (msg->num_fds < 0) {
771 RTE_LOG(ERR, EAL, "Number of fd's is negative\n");
776 if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
777 RTE_LOG(ERR, EAL, "Message data is too long\n");
782 if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
783 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
792 int __rte_experimental
793 rte_mp_sendmsg(struct rte_mp_msg *msg)
795 if (check_input(msg) != 0)
798 RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
799 return mp_send(msg, NULL, MP_MSG);
803 mp_request_async(const char *dst, struct rte_mp_msg *req,
804 struct async_request_param *param, const struct timespec *ts)
806 struct rte_mp_msg *reply_msg;
807 struct pending_request *pending_req, *exist;
810 pending_req = calloc(1, sizeof(*pending_req));
811 reply_msg = calloc(1, sizeof(*reply_msg));
812 if (pending_req == NULL || reply_msg == NULL) {
813 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
819 pending_req->type = REQUEST_TYPE_ASYNC;
820 strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
821 pending_req->request = req;
822 pending_req->reply = reply_msg;
823 pending_req->async.param = param;
825 /* queue already locked by caller */
827 exist = find_pending_request(dst, req->name);
829 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
835 ret = send_msg(dst, req, MP_REQ);
837 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
841 } else if (ret == 0) {
845 param->user_reply.nb_sent++;
847 /* if alarm set fails, we simply ignore the reply */
848 if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
849 async_reply_handle, pending_req) < 0) {
850 RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
855 TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
865 mp_request_sync(const char *dst, struct rte_mp_msg *req,
866 struct rte_mp_reply *reply, const struct timespec *ts)
869 struct rte_mp_msg msg, *tmp;
870 struct pending_request pending_req, *exist;
872 pending_req.type = REQUEST_TYPE_SYNC;
873 pending_req.reply_received = 0;
874 strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
875 pending_req.request = req;
876 pending_req.reply = &msg;
877 pthread_cond_init(&pending_req.sync.cond, NULL);
879 exist = find_pending_request(dst, req->name);
881 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
886 ret = send_msg(dst, req, MP_REQ);
888 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
894 TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
899 ret = pthread_cond_timedwait(&pending_req.sync.cond,
900 &pending_requests.lock, ts);
901 } while (ret != 0 && ret != ETIMEDOUT);
903 TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
905 if (pending_req.reply_received == 0) {
906 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
908 rte_errno = ETIMEDOUT;
911 if (pending_req.reply_received == -1) {
912 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
913 /* not receiving this message is not an error, so decrement
914 * number of sent messages
920 tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
922 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
927 memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
929 reply->nb_received++;
933 int __rte_experimental
934 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
935 const struct timespec *ts)
943 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
946 reply->nb_received = 0;
949 if (check_input(req) != 0)
952 if (internal_config.no_shconf) {
953 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
957 if (gettimeofday(&now, NULL) < 0) {
958 RTE_LOG(ERR, EAL, "Failed to get current time\n");
963 end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
964 end.tv_sec = now.tv_sec + ts->tv_sec +
965 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
967 /* for secondary process, send request to the primary process only */
968 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
969 pthread_mutex_lock(&pending_requests.lock);
970 ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
971 pthread_mutex_unlock(&pending_requests.lock);
977 /* for primary process, broadcast request, and collect reply 1 by 1 */
978 mp_dir = opendir(mp_dir_path);
980 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
985 dir_fd = dirfd(mp_dir);
986 /* lock the directory to prevent processes spinning up while we send */
987 if (flock(dir_fd, LOCK_SH)) {
988 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
995 pthread_mutex_lock(&pending_requests.lock);
996 while ((ent = readdir(mp_dir))) {
999 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1002 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1005 /* unlocks the mutex while waiting for response,
1008 if (mp_request_sync(path, req, reply, &end))
1011 pthread_mutex_unlock(&pending_requests.lock);
1012 /* unlock the directory */
1013 flock(dir_fd, LOCK_UN);
1015 /* dir_fd automatically closed on closedir */
1021 reply->nb_received = 0;
1026 int __rte_experimental
1027 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1028 rte_mp_async_reply_t clb)
1030 struct rte_mp_msg *copy;
1031 struct pending_request *dummy;
1032 struct async_request_param *param;
1033 struct rte_mp_reply *reply;
1034 int dir_fd, ret = 0;
1038 struct timespec *end;
1039 bool dummy_used = false;
1041 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1043 if (check_input(req) != 0)
1046 if (internal_config.no_shconf) {
1047 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1051 if (gettimeofday(&now, NULL) < 0) {
1052 RTE_LOG(ERR, EAL, "Faile to get current time\n");
1056 copy = calloc(1, sizeof(*copy));
1057 dummy = calloc(1, sizeof(*dummy));
1058 param = calloc(1, sizeof(*param));
1059 if (copy == NULL || dummy == NULL || param == NULL) {
1060 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1066 memcpy(copy, req, sizeof(*copy));
1068 param->n_responses_processed = 0;
1071 reply = ¶m->user_reply;
1073 end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1074 end->tv_sec = now.tv_sec + ts->tv_sec +
1075 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1077 reply->nb_received = 0;
1080 /* we have to lock the request queue here, as we will be adding a bunch
1081 * of requests to the queue at once, and some of the replies may arrive
1082 * before we add all of the requests to the queue.
1084 pthread_mutex_lock(&pending_requests.lock);
1086 /* we have to ensure that callback gets triggered even if we don't send
1087 * anything, therefore earlier we have allocated a dummy request. fill
1088 * it, and put it on the queue if we don't send any requests.
1090 dummy->type = REQUEST_TYPE_ASYNC;
1091 dummy->request = copy;
1092 dummy->reply = NULL;
1093 dummy->async.param = param;
1094 dummy->reply_received = 1; /* short-circuit the timeout */
1096 /* for secondary process, send request to the primary process only */
1097 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1098 ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
1100 /* if we didn't send anything, put dummy request on the queue */
1101 if (ret == 0 && reply->nb_sent == 0) {
1102 TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1107 pthread_mutex_unlock(&pending_requests.lock);
1109 /* if we couldn't send anything, clean up */
1115 /* for primary process, broadcast request */
1116 mp_dir = opendir(mp_dir_path);
1118 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1122 dir_fd = dirfd(mp_dir);
1124 /* lock the directory to prevent processes spinning up while we send */
1125 if (flock(dir_fd, LOCK_SH)) {
1126 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1132 while ((ent = readdir(mp_dir))) {
1133 char path[PATH_MAX];
1135 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1138 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1141 if (mp_request_async(path, copy, param, ts))
1144 /* if we didn't send anything, put dummy request on the queue */
1145 if (ret == 0 && reply->nb_sent == 0) {
1146 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1150 /* finally, unlock the queue */
1151 pthread_mutex_unlock(&pending_requests.lock);
1153 /* unlock the directory */
1154 flock(dir_fd, LOCK_UN);
1156 /* dir_fd automatically closed on closedir */
1159 /* if dummy was unused, free it */
1167 pthread_mutex_unlock(&pending_requests.lock);
1175 int __rte_experimental
1176 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1178 RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1180 if (check_input(msg) != 0)
1184 RTE_LOG(ERR, EAL, "peer is not specified\n");
1189 if (internal_config.no_shconf) {
1190 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1194 return mp_send(msg, peer, MP_REP);