1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2018 Intel Corporation
18 #include <sys/types.h>
19 #include <sys/socket.h>
23 #include <rte_common.h>
24 #include <rte_cycles.h>
26 #include <rte_errno.h>
27 #include <rte_lcore.h>
30 #include "eal_private.h"
31 #include "eal_filesystem.h"
32 #include "eal_internal_cfg.h"
34 static int mp_fd = -1;
35 static char mp_filter[PATH_MAX]; /* Filter for secondary process sockets */
36 static char mp_dir_path[PATH_MAX]; /* The directory path for all mp sockets */
37 static pthread_mutex_t mp_mutex_action = PTHREAD_MUTEX_INITIALIZER;
40 TAILQ_ENTRY(action_entry) next;
41 char action_name[RTE_MP_MAX_NAME_LEN];
45 /** Double linked list of actions. */
46 TAILQ_HEAD(action_entry_list, action_entry);
48 static struct action_entry_list action_entry_list =
49 TAILQ_HEAD_INITIALIZER(action_entry_list);
52 MP_MSG, /* Share message with peers, will not block */
53 MP_REQ, /* Request for information, Will block for a reply */
54 MP_REP, /* Response to previously-received request */
55 MP_IGN, /* Response telling requester to ignore this response */
58 struct mp_msg_internal {
60 struct rte_mp_msg msg;
64 TAILQ_ENTRY(sync_request) next;
67 struct rte_mp_msg *request;
68 struct rte_mp_msg *reply;
72 TAILQ_HEAD(sync_request_list, sync_request);
75 struct sync_request_list requests;
78 .requests = TAILQ_HEAD_INITIALIZER(sync_requests.requests),
79 .lock = PTHREAD_MUTEX_INITIALIZER
82 /* forward declarations */
84 mp_send(struct rte_mp_msg *msg, const char *peer, int type);
87 static struct sync_request *
88 find_sync_request(const char *dst, const char *act_name)
90 struct sync_request *r;
92 TAILQ_FOREACH(r, &sync_requests.requests, next) {
93 if (!strcmp(r->dst, dst) &&
94 !strcmp(r->request->name, act_name))
102 create_socket_path(const char *name, char *buf, int len)
104 const char *prefix = eal_mp_socket_path();
106 if (strlen(name) > 0)
107 snprintf(buf, len, "%s_%s", prefix, name);
109 snprintf(buf, len, "%s", prefix);
113 rte_eal_primary_proc_alive(const char *config_file_path)
117 if (config_file_path)
118 config_fd = open(config_file_path, O_RDONLY);
122 path = eal_runtime_config_path();
123 config_fd = open(path, O_RDONLY);
128 int ret = lockf(config_fd, F_TEST, 0);
134 static struct action_entry *
135 find_action_entry_by_name(const char *name)
137 struct action_entry *entry;
139 TAILQ_FOREACH(entry, &action_entry_list, next) {
140 if (strncmp(entry->action_name, name, RTE_MP_MAX_NAME_LEN) == 0)
148 validate_action_name(const char *name)
151 RTE_LOG(ERR, EAL, "Action name cannot be NULL\n");
155 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == 0) {
156 RTE_LOG(ERR, EAL, "Length of action name is zero\n");
160 if (strnlen(name, RTE_MP_MAX_NAME_LEN) == RTE_MP_MAX_NAME_LEN) {
167 int __rte_experimental
168 rte_mp_action_register(const char *name, rte_mp_t action)
170 struct action_entry *entry;
172 if (validate_action_name(name))
175 entry = malloc(sizeof(struct action_entry));
180 strcpy(entry->action_name, name);
181 entry->action = action;
183 pthread_mutex_lock(&mp_mutex_action);
184 if (find_action_entry_by_name(name) != NULL) {
185 pthread_mutex_unlock(&mp_mutex_action);
190 TAILQ_INSERT_TAIL(&action_entry_list, entry, next);
191 pthread_mutex_unlock(&mp_mutex_action);
195 void __rte_experimental
196 rte_mp_action_unregister(const char *name)
198 struct action_entry *entry;
200 if (validate_action_name(name))
203 pthread_mutex_lock(&mp_mutex_action);
204 entry = find_action_entry_by_name(name);
206 pthread_mutex_unlock(&mp_mutex_action);
209 TAILQ_REMOVE(&action_entry_list, entry, next);
210 pthread_mutex_unlock(&mp_mutex_action);
215 read_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
220 char control[CMSG_SPACE(sizeof(m->msg.fds))];
221 struct cmsghdr *cmsg;
222 int buflen = sizeof(*m) - sizeof(m->msg.fds);
224 memset(&msgh, 0, sizeof(msgh));
226 iov.iov_len = buflen;
229 msgh.msg_namelen = sizeof(*s);
232 msgh.msg_control = control;
233 msgh.msg_controllen = sizeof(control);
235 msglen = recvmsg(mp_fd, &msgh, 0);
237 RTE_LOG(ERR, EAL, "recvmsg failed, %s\n", strerror(errno));
241 if (msglen != buflen || (msgh.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
242 RTE_LOG(ERR, EAL, "truncted msg\n");
246 /* read auxiliary FDs if any */
247 for (cmsg = CMSG_FIRSTHDR(&msgh); cmsg != NULL;
248 cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
249 if ((cmsg->cmsg_level == SOL_SOCKET) &&
250 (cmsg->cmsg_type == SCM_RIGHTS)) {
251 memcpy(m->msg.fds, CMSG_DATA(cmsg), sizeof(m->msg.fds));
260 process_msg(struct mp_msg_internal *m, struct sockaddr_un *s)
262 struct sync_request *sync_req;
263 struct action_entry *entry;
264 struct rte_mp_msg *msg = &m->msg;
265 rte_mp_t action = NULL;
267 RTE_LOG(DEBUG, EAL, "msg: %s\n", msg->name);
269 if (m->type == MP_REP || m->type == MP_IGN) {
270 pthread_mutex_lock(&sync_requests.lock);
271 sync_req = find_sync_request(s->sun_path, msg->name);
273 memcpy(sync_req->reply, msg, sizeof(*msg));
274 /* -1 indicates that we've been asked to ignore */
275 sync_req->reply_received = m->type == MP_REP ? 1 : -1;
276 pthread_cond_signal(&sync_req->cond);
278 RTE_LOG(ERR, EAL, "Drop mp reply: %s\n", msg->name);
279 pthread_mutex_unlock(&sync_requests.lock);
283 pthread_mutex_lock(&mp_mutex_action);
284 entry = find_action_entry_by_name(msg->name);
286 action = entry->action;
287 pthread_mutex_unlock(&mp_mutex_action);
290 if (m->type == MP_REQ && !internal_config.init_complete) {
291 /* if this is a request, and init is not yet complete,
292 * and callback wasn't registered, we should tell the
293 * requester to ignore our existence because we're not
294 * yet ready to process this request.
296 struct rte_mp_msg dummy;
297 memset(&dummy, 0, sizeof(dummy));
298 mp_send(&dummy, s->sun_path, MP_IGN);
300 RTE_LOG(ERR, EAL, "Cannot find action: %s\n",
303 } else if (action(msg, s->sun_path) < 0) {
304 RTE_LOG(ERR, EAL, "Fail to handle message: %s\n", msg->name);
309 mp_handle(void *arg __rte_unused)
311 struct mp_msg_internal msg;
312 struct sockaddr_un sa;
315 if (read_msg(&msg, &sa) == 0)
316 process_msg(&msg, &sa);
325 char peer_name[PATH_MAX] = {0};
326 struct sockaddr_un un;
328 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
329 snprintf(peer_name, sizeof(peer_name),
330 "%d_%"PRIx64, getpid(), rte_rdtsc());
332 mp_fd = socket(AF_UNIX, SOCK_DGRAM, 0);
334 RTE_LOG(ERR, EAL, "failed to create unix socket\n");
338 memset(&un, 0, sizeof(un));
339 un.sun_family = AF_UNIX;
341 create_socket_path(peer_name, un.sun_path, sizeof(un.sun_path));
343 unlink(un.sun_path); /* May still exist since last run */
345 if (bind(mp_fd, (struct sockaddr *)&un, sizeof(un)) < 0) {
346 RTE_LOG(ERR, EAL, "failed to bind %s: %s\n",
347 un.sun_path, strerror(errno));
352 RTE_LOG(INFO, EAL, "Multi-process socket %s\n", un.sun_path);
357 unlink_sockets(const char *filter)
363 mp_dir = opendir(mp_dir_path);
365 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
368 dir_fd = dirfd(mp_dir);
370 while ((ent = readdir(mp_dir))) {
371 if (fnmatch(filter, ent->d_name, 0) == 0)
372 unlinkat(dir_fd, ent->d_name, 0);
380 rte_mp_channel_init(void)
382 char thread_name[RTE_MAX_THREAD_NAME_LEN];
387 /* create filter path */
388 create_socket_path("*", path, sizeof(path));
389 snprintf(mp_filter, sizeof(mp_filter), "%s", basename(path));
391 /* path may have been modified, so recreate it */
392 create_socket_path("*", path, sizeof(path));
393 snprintf(mp_dir_path, sizeof(mp_dir_path), "%s", dirname(path));
395 /* lock the directory */
396 dir_fd = open(mp_dir_path, O_RDONLY);
398 RTE_LOG(ERR, EAL, "failed to open %s: %s\n",
399 mp_dir_path, strerror(errno));
403 if (flock(dir_fd, LOCK_EX)) {
404 RTE_LOG(ERR, EAL, "failed to lock %s: %s\n",
405 mp_dir_path, strerror(errno));
410 if (rte_eal_process_type() == RTE_PROC_PRIMARY &&
411 unlink_sockets(mp_filter)) {
412 RTE_LOG(ERR, EAL, "failed to unlink mp sockets\n");
417 if (open_socket_fd() < 0) {
422 if (pthread_create(&tid, NULL, mp_handle, NULL) < 0) {
423 RTE_LOG(ERR, EAL, "failed to create mp thead: %s\n",
431 /* try best to set thread name */
432 snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "rte_mp_handle");
433 rte_thread_setname(tid, thread_name);
435 /* unlock the directory */
436 flock(dir_fd, LOCK_UN);
443 * Return -1, as fail to send message and it's caused by the local side.
444 * Return 0, as fail to send message and it's caused by the remote side.
445 * Return 1, as succeed to send message.
449 send_msg(const char *dst_path, struct rte_mp_msg *msg, int type)
454 struct cmsghdr *cmsg;
455 struct sockaddr_un dst;
456 struct mp_msg_internal m;
457 int fd_size = msg->num_fds * sizeof(int);
458 char control[CMSG_SPACE(fd_size)];
461 memcpy(&m.msg, msg, sizeof(*msg));
463 memset(&dst, 0, sizeof(dst));
464 dst.sun_family = AF_UNIX;
465 snprintf(dst.sun_path, sizeof(dst.sun_path), "%s", dst_path);
467 memset(&msgh, 0, sizeof(msgh));
468 memset(control, 0, sizeof(control));
471 iov.iov_len = sizeof(m) - sizeof(msg->fds);
473 msgh.msg_name = &dst;
474 msgh.msg_namelen = sizeof(dst);
477 msgh.msg_control = control;
478 msgh.msg_controllen = sizeof(control);
480 cmsg = CMSG_FIRSTHDR(&msgh);
481 cmsg->cmsg_len = CMSG_LEN(fd_size);
482 cmsg->cmsg_level = SOL_SOCKET;
483 cmsg->cmsg_type = SCM_RIGHTS;
484 memcpy(CMSG_DATA(cmsg), msg->fds, fd_size);
487 snd = sendmsg(mp_fd, &msgh, 0);
488 } while (snd < 0 && errno == EINTR);
492 /* Check if it caused by peer process exits */
493 if (errno == ECONNREFUSED &&
494 rte_eal_process_type() == RTE_PROC_PRIMARY) {
498 if (errno == ENOBUFS) {
499 RTE_LOG(ERR, EAL, "Peer cannot receive message %s\n",
503 RTE_LOG(ERR, EAL, "failed to send to (%s) due to %s\n",
504 dst_path, strerror(errno));
512 mp_send(struct rte_mp_msg *msg, const char *peer, int type)
518 if (!peer && (rte_eal_process_type() == RTE_PROC_SECONDARY))
519 peer = eal_mp_socket_path();
522 if (send_msg(peer, msg, type) < 0)
528 /* broadcast to all secondary processes */
529 mp_dir = opendir(mp_dir_path);
531 RTE_LOG(ERR, EAL, "Unable to open directory %s\n",
537 dir_fd = dirfd(mp_dir);
538 /* lock the directory to prevent processes spinning up while we send */
539 if (flock(dir_fd, LOCK_EX)) {
540 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
547 while ((ent = readdir(mp_dir))) {
550 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
553 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
555 if (send_msg(path, msg, type) < 0)
559 flock(dir_fd, LOCK_UN);
561 /* dir_fd automatically closed on closedir */
567 check_input(const struct rte_mp_msg *msg)
570 RTE_LOG(ERR, EAL, "Msg cannot be NULL\n");
575 if (validate_action_name(msg->name))
578 if (msg->len_param > RTE_MP_MAX_PARAM_LEN) {
579 RTE_LOG(ERR, EAL, "Message data is too long\n");
584 if (msg->num_fds > RTE_MP_MAX_FD_NUM) {
585 RTE_LOG(ERR, EAL, "Cannot send more than %d FDs\n",
594 int __rte_experimental
595 rte_mp_sendmsg(struct rte_mp_msg *msg)
597 if (!check_input(msg))
600 RTE_LOG(DEBUG, EAL, "sendmsg: %s\n", msg->name);
601 return mp_send(msg, NULL, MP_MSG);
605 mp_request_one(const char *dst, struct rte_mp_msg *req,
606 struct rte_mp_reply *reply, const struct timespec *ts)
609 struct rte_mp_msg msg, *tmp;
610 struct sync_request sync_req, *exist;
612 sync_req.reply_received = 0;
613 strcpy(sync_req.dst, dst);
614 sync_req.request = req;
615 sync_req.reply = &msg;
616 pthread_cond_init(&sync_req.cond, NULL);
618 pthread_mutex_lock(&sync_requests.lock);
619 exist = find_sync_request(dst, req->name);
621 TAILQ_INSERT_TAIL(&sync_requests.requests, &sync_req, next);
623 RTE_LOG(ERR, EAL, "A pending request %s:%s\n", dst, req->name);
625 pthread_mutex_unlock(&sync_requests.lock);
629 ret = send_msg(dst, req, MP_REQ);
631 RTE_LOG(ERR, EAL, "Fail to send request %s:%s\n",
640 ret = pthread_cond_timedwait(&sync_req.cond,
641 &sync_requests.lock, ts);
642 } while (ret != 0 && ret != ETIMEDOUT);
644 /* We got the lock now */
645 TAILQ_REMOVE(&sync_requests.requests, &sync_req, next);
646 pthread_mutex_unlock(&sync_requests.lock);
648 if (sync_req.reply_received == 0) {
649 RTE_LOG(ERR, EAL, "Fail to recv reply for request %s:%s\n",
651 rte_errno = ETIMEDOUT;
654 if (sync_req.reply_received == -1) {
655 RTE_LOG(DEBUG, EAL, "Asked to ignore response\n");
656 /* not receiving this message is not an error, so decrement
657 * number of sent messages
663 tmp = realloc(reply->msgs, sizeof(msg) * (reply->nb_received + 1));
665 RTE_LOG(ERR, EAL, "Fail to alloc reply for request %s:%s\n",
670 memcpy(&tmp[reply->nb_received], &msg, sizeof(msg));
672 reply->nb_received++;
676 int __rte_experimental
677 rte_mp_request(struct rte_mp_msg *req, struct rte_mp_reply *reply,
678 const struct timespec *ts)
686 RTE_LOG(DEBUG, EAL, "request: %s\n", req->name);
688 if (check_input(req) == false)
690 if (gettimeofday(&now, NULL) < 0) {
691 RTE_LOG(ERR, EAL, "Faile to get current time\n");
696 end.tv_nsec = (now.tv_usec * 1000 + ts->tv_nsec) % 1000000000;
697 end.tv_sec = now.tv_sec + ts->tv_sec +
698 (now.tv_usec * 1000 + ts->tv_nsec) / 1000000000;
701 reply->nb_received = 0;
704 /* for secondary process, send request to the primary process only */
705 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
706 return mp_request_one(eal_mp_socket_path(), req, reply, &end);
708 /* for primary process, broadcast request, and collect reply 1 by 1 */
709 mp_dir = opendir(mp_dir_path);
711 RTE_LOG(ERR, EAL, "Unable to open directory %s\n", mp_dir_path);
716 dir_fd = dirfd(mp_dir);
717 /* lock the directory to prevent processes spinning up while we send */
718 if (flock(dir_fd, LOCK_EX)) {
719 RTE_LOG(ERR, EAL, "Unable to lock directory %s\n",
726 while ((ent = readdir(mp_dir))) {
729 if (fnmatch(mp_filter, ent->d_name, 0) != 0)
732 snprintf(path, sizeof(path), "%s/%s", mp_dir_path,
735 if (mp_request_one(path, req, reply, &end))
738 /* unlock the directory */
739 flock(dir_fd, LOCK_UN);
741 /* dir_fd automatically closed on closedir */
746 int __rte_experimental
747 rte_mp_reply(struct rte_mp_msg *msg, const char *peer)
750 RTE_LOG(DEBUG, EAL, "reply: %s\n", msg->name);
752 if (check_input(msg) == false)
756 RTE_LOG(ERR, EAL, "peer is not specified\n");
761 return mp_send(msg, peer, MP_REP);