1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
18 #include <sys/types.h>
19 #include <sys/socket.h>
23 #include <rte_alarm.h>
24 #include <rte_common.h>
25 #include <rte_cycles.h>
27 #include <rte_errno.h>
28 #include <rte_lcore.h>
30 #include <rte_tailq.h>
32 #include "eal_private.h"
33 #include "eal_filesystem.h"
34 #include "eal_internal_cfg.h"
36 static int mp_fd = -1;
37 static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
38 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
39 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
42 TAILQ_ENTRY(action_entry) next;
43 char action_name[RTE_MP_MAX_NAME_LEN];
47 /** Double linked list of actions. */
48 TAILQ_HEAD(action_entry_list, action_entry);
50 static struct action_entry_list action_entry_list =
51 TAILQ_HEAD_INITIALIZER(action_entry_list);
54 MP_MSG, /* Share message with peers, will not block */
55 MP_REQ, /* Request for information, Will block for a reply */
56 MP_REP, /* Response to previously-received request */
57 MP_IGN, /* Response telling requester to ignore this response */
60 struct mp_msg_internal {
62 struct rte_mp_msg msg;
65 struct async_request_param {
66 rte_mp_async_reply_t clb;
67 struct rte_mp_reply user_reply;
69 int n_responses_processed;
72 struct pending_request {
73 TAILQ_ENTRY(pending_request) next;
79 struct rte_mp_msg *request;
80 struct rte_mp_msg *reply;
85 struct async_request_param *param;
93 TAILQ_HEAD(pending_request_list, pending_request);
96 struct pending_request_list requests;
98 } pending_requests = {
99 .requests = TAILQ_HEAD_INITIALIZER(pending_requests.requests),
100 .lock = PTHREAD_MUTEX_INITIALIZER,
101 /**< used in async requests only */
104 /* forward declarations */
106 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
108 /* for use with alarm callback */
110 async_reply_handle(void *arg);
112 /* for use with process_msg */
113 static struct pending_request *
114 async_reply_handle_thread_unsafe(void *arg);
117 trigger_async_action(struct pending_request *req);
119 static struct pending_request *
120 find_pending_request(const char *dst, const char *act_name)
122 struct pending_request *r;
124 TAILQ_FOREACH(r, &pending_requests.requests, next) {
125 if (!strcmp(r->dst, dst) &&
126 !strcmp(r->request->name, act_name))
134 create_socket_path(const char *name, char *buf, int len)
136 const char *prefix = eal_mp_socket_path();
138 if (strlen(name) > 0)
139 snprintf(buf, len, "%s_%s", prefix, name);
141 strlcpy(buf, prefix, len);
145 rte_eal_primary_proc_alive(const char *config_file_path)
149 if (config_file_path)
150 config_fd = open(config_file_path, O_RDONLY);
154 path = eal_runtime_config_path();
155 config_fd = open(path, O_RDONLY);
160 int ret = lockf(config_fd, F_TEST, 0);
166 static struct action_entry *
167 find_action_entry_by_name(const char *name)
169 struct action_entry *entry;
171 TAILQ_FOREACH(entry, &action_entry_list, next) {
172 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
180 validate_action_name(const char *name)
183 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
187 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
188 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
192 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
199 int __rte_experimental
200 rte_mp_action_register(const char *name, rte_mp_t action)
202 struct action_entry *entry;
204 if (validate_action_name(name))
207 entry = malloc(sizeof(struct action_entry));
212 strlcpy(entry->action_name, name, sizeof(entry->action_name));
213 entry->action = action;
215 pthread_mutex_lock(&mp_mutex_action);
216 if (find_action_entry_by_name(name) != NULL) {
217 pthread_mutex_unlock(&mp_mutex_action);
222 TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
223 pthread_mutex_unlock(&mp_mutex_action);
227 void __rte_experimental
228 rte_mp_action_unregister(const char *name)
230 struct action_entry *entry;
232 if (validate_action_name(name))
235 pthread_mutex_lock(&mp_mutex_action);
236 entry = find_action_entry_by_name(name);
238 pthread_mutex_unlock(&mp_mutex_action);
241 TAILQ_REMOVE(&action_entry_list, entry, next);
242 pthread_mutex_unlock(&mp_mutex_action);
247 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
252 char control[CMSG_SPACE(sizeof(m->msg.fds))];
253 struct cmsghdr *cmsg;
254 int buflen = sizeof(*m) - sizeof(m->msg.fds);
256 memset(&msgh, 0, sizeof(msgh));
258 iov.iov_len = buflen;
261 msgh.msg_namelen = sizeof(*s);
264 msgh.msg_control = control;
265 msgh.msg_controllen = sizeof(control);
267 msglen = recvmsg(mp_fd, &msgh, 0);
269 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
273 if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
274 RTE_LOG(ERR, EAL, "truncted msg\n");
278 /* read auxiliary FDs if any */
279 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
280 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
281 if ((cmsg->cmsg_level == SOL_SOCKET) &&
282 (cmsg->cmsg_type == SCM_RIGHTS)) {
283 memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
292 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
294 struct pending_request *pending_req;
295 struct action_entry *entry;
296 struct rte_mp_msg *msg = &m->msg;
297 rte_mp_t action = NULL;
299 RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
301 if (m->type == MP_REP || m->type == MP_IGN) {
302 struct pending_request *req = NULL;
304 pthread_mutex_lock(&pending_requests.lock);
305 pending_req = find_pending_request(s->sun_path, msg->name);
307 memcpy(pending_req->reply, msg, sizeof(*msg));
308 /* -1 indicates that we've been asked to ignore */
309 pending_req->reply_received =
310 m->type == MP_REP ? 1 : -1;
312 if (pending_req->type == REQUEST_TYPE_SYNC)
313 pthread_cond_signal(&pending_req->sync.cond);
314 else if (pending_req->type == REQUEST_TYPE_ASYNC)
315 req = async_reply_handle_thread_unsafe(
318 RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
319 pthread_mutex_unlock(&pending_requests.lock);
322 trigger_async_action(req);
326 pthread_mutex_lock(&mp_mutex_action);
327 entry = find_action_entry_by_name(msg->name);
329 action = entry->action;
330 pthread_mutex_unlock(&mp_mutex_action);
333 if (m->type == MP_REQ && !internal_config.init_complete) {
334 /* if this is a request, and init is not yet complete,
335 * and callback wasn't registered, we should tell the
336 * requester to ignore our existence because we're not
337 * yet ready to process this request.
339 struct rte_mp_msg dummy;
341 memset(&dummy, 0, sizeof(dummy));
342 strlcpy(dummy.name, msg->name, sizeof(dummy.name));
343 mp_send(&dummy, s->sun_path, MP_IGN);
345 RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
348 } else if (action(msg, s->sun_path) < 0) {
349 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
354 mp_handle(void *arg __rte_unused)
356 struct mp_msg_internal msg;
357 struct sockaddr_un sa;
360 if (read_msg(&msg, &sa) == 0)
361 process_msg(&msg, &sa);
368 timespec_cmp(const struct timespec *a, const struct timespec *b)
370 if (a->tv_sec < b->tv_sec)
372 if (a->tv_sec > b->tv_sec)
374 if (a->tv_nsec < b->tv_nsec)
376 if (a->tv_nsec > b->tv_nsec)
382 ACTION_FREE, /**< free the action entry, but don't trigger callback */
383 ACTION_TRIGGER /**< trigger callback, then free action entry */
386 static enum async_action
387 process_async_request(struct pending_request *sr, const struct timespec *now)
389 struct async_request_param *param;
390 struct rte_mp_reply *reply;
391 bool timeout, last_msg;
393 param = sr->async.param;
394 reply = ¶m->user_reply;
396 /* did we timeout? */
397 timeout = timespec_cmp(¶m->end, now) <= 0;
399 /* if we received a response, adjust relevant data and copy mesasge. */
400 if (sr->reply_received == 1 && sr->reply) {
401 struct rte_mp_msg *msg, *user_msgs, *tmp;
404 user_msgs = reply->msgs;
406 tmp = realloc(user_msgs, sizeof(*msg) *
407 (reply->nb_received + 1));
409 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
410 sr->dst, sr->request->name);
411 /* this entry is going to be removed and its message
412 * dropped, but we don't want to leak memory, so
417 reply->msgs = user_msgs;
418 memcpy(&user_msgs[reply->nb_received],
420 reply->nb_received++;
423 /* mark this request as processed */
424 param->n_responses_processed++;
425 } else if (sr->reply_received == -1) {
426 /* we were asked to ignore this process */
428 } else if (timeout) {
429 /* count it as processed response, but don't increment
432 param->n_responses_processed++;
437 last_msg = param->n_responses_processed == reply->nb_sent;
439 return last_msg ? ACTION_TRIGGER : ACTION_FREE;
443 trigger_async_action(struct pending_request *sr)
445 struct async_request_param *param;
446 struct rte_mp_reply *reply;
448 param = sr->async.param;
449 reply = ¶m->user_reply;
451 param->clb(sr->request, reply);
454 free(sr->async.param->user_reply.msgs);
455 free(sr->async.param);
460 static struct pending_request *
461 async_reply_handle_thread_unsafe(void *arg)
463 struct pending_request *req = (struct pending_request *)arg;
464 enum async_action action;
465 struct timespec ts_now;
468 if (gettimeofday(&now, NULL) < 0) {
469 RTE_LOG(ERR, EAL, "Cannot get current time\n");
472 ts_now.tv_nsec = now.tv_usec * 1000;
473 ts_now.tv_sec = now.tv_sec;
475 action = process_async_request(req, &ts_now);
477 TAILQ_REMOVE(&pending_requests.requests, req, next);
479 if (rte_eal_alarm_cancel(async_reply_handle, req) < 0) {
480 /* if we failed to cancel the alarm because it's already in
481 * progress, don't proceed because otherwise we will end up
482 * handling the same message twice.
484 if (rte_errno == EINPROGRESS) {
485 RTE_LOG(DEBUG, EAL, "Request handling is already in progress\n");
488 RTE_LOG(ERR, EAL, "Failed to cancel alarm\n");
491 if (action == ACTION_TRIGGER)
499 async_reply_handle(void *arg)
501 struct pending_request *req;
503 pthread_mutex_lock(&pending_requests.lock);
504 req = async_reply_handle_thread_unsafe(arg);
505 pthread_mutex_unlock(&pending_requests.lock);
508 trigger_async_action(req);
514 char peer_name[PATH_MAX] = {0};
515 struct sockaddr_un un;
517 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
518 snprintf(peer_name, sizeof(peer_name),
519 "%d_%"PRIx64, getpid(), rte_rdtsc());
521 mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
523 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
527 memset(&un, 0, sizeof(un));
528 un.sun_family = AF_UNIX;
530 create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
532 unlink(un.sun_path); /* May still exist since last run */
534 if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
535 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
536 un.sun_path, strerror(errno));
541 RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
546 unlink_sockets(const char *filter)
552 mp_dir = opendir(mp_dir_path);
554 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
557 dir_fd = dirfd(mp_dir);
559 while ((ent = readdir(mp_dir))) {
560 if (fnmatch(filter, ent->d_name, 0) == 0)
561 unlinkat(dir_fd, ent->d_name, 0);
569 rte_mp_channel_init(void)
573 pthread_t mp_handle_tid;
575 /* in no shared files mode, we do not have secondary processes support,
576 * so no need to initialize IPC.
578 if (internal_config.no_shconf) {
579 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC will be disabled\n");
583 /* create filter path */
584 create_socket_path("*", path, sizeof(path));
585 strlcpy(mp_filter, basename(path), sizeof(mp_filter));
587 /* path may have been modified, so recreate it */
588 create_socket_path("*", path, sizeof(path));
589 strlcpy(mp_dir_path, dirname(path), sizeof(mp_dir_path));
591 /* lock the directory */
592 dir_fd = open(mp_dir_path, O_RDONLY);
594 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
595 mp_dir_path, strerror(errno));
599 if (flock(dir_fd, LOCK_EX)) {
600 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
601 mp_dir_path, strerror(errno));
606 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
607 unlink_sockets(mp_filter)) {
608 RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
613 if (open_socket_fd() < 0) {
618 if (rte_ctrl_thread_create(&mp_handle_tid, "rte_mp_handle",
619 NULL, mp_handle, NULL) < 0) {
620 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
628 /* unlock the directory */
629 flock(dir_fd, LOCK_UN);
636 * Return -1, as fail to send message and it's caused by the local side.
637 * Return 0, as fail to send message and it's caused by the remote side.
638 * Return 1, as succeed to send message.
642 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
647 struct cmsghdr *cmsg;
648 struct sockaddr_un dst;
649 struct mp_msg_internal m;
650 int fd_size = msg->num_fds * sizeof(int);
651 char control[CMSG_SPACE(fd_size)];
654 memcpy(&m.msg, msg, sizeof(*msg));
656 memset(&dst, 0, sizeof(dst));
657 dst.sun_family = AF_UNIX;
658 strlcpy(dst.sun_path, dst_path, sizeof(dst.sun_path));
660 memset(&msgh, 0, sizeof(msgh));
661 memset(control, 0, sizeof(control));
664 iov.iov_len = sizeof(m) - sizeof(msg->fds);
666 msgh.msg_name = &dst;
667 msgh.msg_namelen = sizeof(dst);
670 msgh.msg_control = control;
671 msgh.msg_controllen = sizeof(control);
673 cmsg = CMSG_FIRSTHDR(&msgh);
674 cmsg->cmsg_len = CMSG_LEN(fd_size);
675 cmsg->cmsg_level = SOL_SOCKET;
676 cmsg->cmsg_type = SCM_RIGHTS;
677 memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
680 snd = sendmsg(mp_fd, &msgh, 0);
681 } while (snd < 0 && errno == EINTR);
685 /* Check if it caused by peer process exits */
686 if (errno == ECONNREFUSED &&
687 rte_eal_process_type() == RTE_PROC_PRIMARY) {
691 if (errno == ENOBUFS) {
692 RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
696 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
697 dst_path, strerror(errno));
705 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
711 if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
712 peer = eal_mp_socket_path();
715 if (send_msg(peer, msg, type) < 0)
721 /* broadcast to all secondary processes */
722 mp_dir = opendir(mp_dir_path);
724 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
730 dir_fd = dirfd(mp_dir);
731 /* lock the directory to prevent processes spinning up while we send */
732 if (flock(dir_fd, LOCK_SH)) {
733 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
740 while ((ent = readdir(mp_dir))) {
743 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
746 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
748 if (send_msg(path, msg, type) < 0)
752 flock(dir_fd, LOCK_UN);
754 /* dir_fd automatically closed on closedir */
760 check_input(const struct rte_mp_msg *msg)
763 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
768 if (validate_action_name(msg->name))
771 if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
772 RTE_LOG(ERR, EAL, "Message data is too long\n");
777 if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
778 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
787 int __rte_experimental
788 rte_mp_sendmsg(struct rte_mp_msg *msg)
790 if (!check_input(msg))
793 RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
794 return mp_send(msg, NULL, MP_MSG);
798 mp_request_async(const char *dst, struct rte_mp_msg *req,
799 struct async_request_param *param, const struct timespec *ts)
801 struct rte_mp_msg *reply_msg;
802 struct pending_request *pending_req, *exist;
805 pending_req = calloc(1, sizeof(*pending_req));
806 reply_msg = calloc(1, sizeof(*reply_msg));
807 if (pending_req == NULL || reply_msg == NULL) {
808 RTE_LOG(ERR, EAL, "Could not allocate space for sync request\n");
814 pending_req->type = REQUEST_TYPE_ASYNC;
815 strlcpy(pending_req->dst, dst, sizeof(pending_req->dst));
816 pending_req->request = req;
817 pending_req->reply = reply_msg;
818 pending_req->async.param = param;
820 /* queue already locked by caller */
822 exist = find_pending_request(dst, req->name);
824 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
830 ret = send_msg(dst, req, MP_REQ);
832 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
836 } else if (ret == 0) {
840 TAILQ_INSERT_TAIL(&pending_requests.requests, pending_req, next);
842 param->user_reply.nb_sent++;
844 if (rte_eal_alarm_set(ts->tv_sec * 1000000 + ts->tv_nsec / 1000,
845 async_reply_handle, pending_req) < 0) {
846 RTE_LOG(ERR, EAL, "Fail to set alarm for request %s:%s\n",
848 rte_panic("Fix the above shit to properly free all memory\n");
859 mp_request_sync(const char *dst, struct rte_mp_msg *req,
860 struct rte_mp_reply *reply, const struct timespec *ts)
863 struct rte_mp_msg msg, *tmp;
864 struct pending_request pending_req, *exist;
866 pending_req.type = REQUEST_TYPE_SYNC;
867 pending_req.reply_received = 0;
868 strlcpy(pending_req.dst, dst, sizeof(pending_req.dst));
869 pending_req.request = req;
870 pending_req.reply = &msg;
871 pthread_cond_init(&pending_req.sync.cond, NULL);
873 exist = find_pending_request(dst, req->name);
875 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
880 ret = send_msg(dst, req, MP_REQ);
882 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
888 TAILQ_INSERT_TAIL(&pending_requests.requests, &pending_req, next);
893 ret = pthread_cond_timedwait(&pending_req.sync.cond,
894 &pending_requests.lock, ts);
895 } while (ret != 0 && ret != ETIMEDOUT);
897 TAILQ_REMOVE(&pending_requests.requests, &pending_req, next);
899 if (pending_req.reply_received == 0) {
900 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
902 rte_errno = ETIMEDOUT;
905 if (pending_req.reply_received == -1) {
906 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
907 /* not receiving this message is not an error, so decrement
908 * number of sent messages
914 tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
916 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
921 memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
923 reply->nb_received++;
927 int __rte_experimental
928 rte_mp_request_sync(struct rte_mp_msg *req, struct rte_mp_reply *reply,
929 const struct timespec *ts)
937 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
939 if (check_input(req) == false)
943 reply->nb_received = 0;
946 if (internal_config.no_shconf) {
947 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
951 if (gettimeofday(&now, NULL) < 0) {
952 RTE_LOG(ERR, EAL, "Failed to get current time\n");
957 end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
958 end.tv_sec = now.tv_sec + ts->tv_sec +
959 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
961 /* for secondary process, send request to the primary process only */
962 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
963 pthread_mutex_lock(&pending_requests.lock);
964 ret = mp_request_sync(eal_mp_socket_path(), req, reply, &end);
965 pthread_mutex_unlock(&pending_requests.lock);
969 /* for primary process, broadcast request, and collect reply 1 by 1 */
970 mp_dir = opendir(mp_dir_path);
972 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
977 dir_fd = dirfd(mp_dir);
978 /* lock the directory to prevent processes spinning up while we send */
979 if (flock(dir_fd, LOCK_SH)) {
980 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
987 pthread_mutex_lock(&pending_requests.lock);
988 while ((ent = readdir(mp_dir))) {
991 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
994 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
997 /* unlocks the mutex while waiting for response,
1000 if (mp_request_sync(path, req, reply, &end))
1003 pthread_mutex_unlock(&pending_requests.lock);
1004 /* unlock the directory */
1005 flock(dir_fd, LOCK_UN);
1007 /* dir_fd automatically closed on closedir */
1012 int __rte_experimental
1013 rte_mp_request_async(struct rte_mp_msg *req, const struct timespec *ts,
1014 rte_mp_async_reply_t clb)
1016 struct rte_mp_msg *copy;
1017 struct pending_request *dummy;
1018 struct async_request_param *param;
1019 struct rte_mp_reply *reply;
1020 int dir_fd, ret = 0;
1024 struct timespec *end;
1025 bool dummy_used = false;
1027 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
1029 if (check_input(req) == false)
1032 if (internal_config.no_shconf) {
1033 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1037 if (gettimeofday(&now, NULL) < 0) {
1038 RTE_LOG(ERR, EAL, "Faile to get current time\n");
1042 copy = calloc(1, sizeof(*copy));
1043 dummy = calloc(1, sizeof(*dummy));
1044 param = calloc(1, sizeof(*param));
1045 if (copy == NULL || dummy == NULL || param == NULL) {
1046 RTE_LOG(ERR, EAL, "Failed to allocate memory for async reply\n");
1052 memcpy(copy, req, sizeof(*copy));
1054 param->n_responses_processed = 0;
1057 reply = ¶m->user_reply;
1059 end->tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
1060 end->tv_sec = now.tv_sec + ts->tv_sec +
1061 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
1063 reply->nb_received = 0;
1066 /* we have to lock the request queue here, as we will be adding a bunch
1067 * of requests to the queue at once, and some of the replies may arrive
1068 * before we add all of the requests to the queue.
1070 pthread_mutex_lock(&pending_requests.lock);
1072 /* we have to ensure that callback gets triggered even if we don't send
1073 * anything, therefore earlier we have allocated a dummy request. fill
1074 * it, and put it on the queue if we don't send any requests.
1076 dummy->type = REQUEST_TYPE_ASYNC;
1077 dummy->request = copy;
1078 dummy->reply = NULL;
1079 dummy->async.param = param;
1080 dummy->reply_received = 1; /* short-circuit the timeout */
1082 /* for secondary process, send request to the primary process only */
1083 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1084 ret = mp_request_async(eal_mp_socket_path(), copy, param, ts);
1086 /* if we didn't send anything, put dummy request on the queue */
1087 if (ret == 0 && reply->nb_sent == 0) {
1088 TAILQ_INSERT_TAIL(&pending_requests.requests, dummy,
1093 pthread_mutex_unlock(&pending_requests.lock);
1095 /* if we couldn't send anything, clean up */
1101 /* for primary process, broadcast request */
1102 mp_dir = opendir(mp_dir_path);
1104 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
1108 dir_fd = dirfd(mp_dir);
1110 /* lock the directory to prevent processes spinning up while we send */
1111 if (flock(dir_fd, LOCK_SH)) {
1112 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
1118 while ((ent = readdir(mp_dir))) {
1119 char path[PATH_MAX];
1121 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
1124 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
1127 if (mp_request_async(path, copy, param, ts))
1130 /* if we didn't send anything, put dummy request on the queue */
1131 if (ret == 0 && reply->nb_sent == 0) {
1132 TAILQ_INSERT_HEAD(&pending_requests.requests, dummy, next);
1136 /* finally, unlock the queue */
1137 pthread_mutex_unlock(&pending_requests.lock);
1139 /* unlock the directory */
1140 flock(dir_fd, LOCK_UN);
1142 /* dir_fd automatically closed on closedir */
1145 /* if dummy was unused, free it */
1153 pthread_mutex_unlock(&pending_requests.lock);
1161 int __rte_experimental
1162 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
1164 RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
1166 if (check_input(msg) == false)
1170 RTE_LOG(ERR, EAL, "peer is not specified\n");
1175 if (internal_config.no_shconf) {
1176 RTE_LOG(DEBUG, EAL, "No shared files mode enabled, IPC is disabled\n");
1180 return mp_send(msg, peer, MP_REP);