af260f7a87792fbacc053df88d177da67efe76c5
[dpdk.git] / drivers / net / memif / rte_eth_memif.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2019 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <stdlib.h>
6 #include <fcntl.h>
7 #include <unistd.h>
8 #include <sys/types.h>
9 #include <sys/socket.h>
10 #include <sys/un.h>
11 #include <sys/ioctl.h>
12 #include <sys/mman.h>
13 #include <linux/if_ether.h>
14 #include <errno.h>
15 #include <sys/eventfd.h>
16
17 #include <rte_version.h>
18 #include <rte_mbuf.h>
19 #include <rte_ether.h>
20 #include <rte_ethdev_driver.h>
21 #include <rte_ethdev_vdev.h>
22 #include <rte_malloc.h>
23 #include <rte_kvargs.h>
24 #include <rte_bus_vdev.h>
25 #include <rte_string_fns.h>
26
27 #include "rte_eth_memif.h"
28 #include "memif_socket.h"
29
30 #define ETH_MEMIF_ID_ARG                "id"
31 #define ETH_MEMIF_ROLE_ARG              "role"
32 #define ETH_MEMIF_PKT_BUFFER_SIZE_ARG   "bsize"
33 #define ETH_MEMIF_RING_SIZE_ARG         "rsize"
34 #define ETH_MEMIF_SOCKET_ARG            "socket"
35 #define ETH_MEMIF_MAC_ARG               "mac"
36 #define ETH_MEMIF_ZC_ARG                "zero-copy"
37 #define ETH_MEMIF_SECRET_ARG            "secret"
38
39 static const char * const valid_arguments[] = {
40         ETH_MEMIF_ID_ARG,
41         ETH_MEMIF_ROLE_ARG,
42         ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
43         ETH_MEMIF_RING_SIZE_ARG,
44         ETH_MEMIF_SOCKET_ARG,
45         ETH_MEMIF_MAC_ARG,
46         ETH_MEMIF_ZC_ARG,
47         ETH_MEMIF_SECRET_ARG,
48         NULL
49 };
50
51 #define MEMIF_MP_SEND_REGION            "memif_mp_send_region"
52
53 const char *
54 memif_version(void)
55 {
56         return ("memif-" RTE_STR(MEMIF_VERSION_MAJOR) "." RTE_STR(MEMIF_VERSION_MINOR));
57 }
58
59 /* Message header to synchronize regions */
60 struct mp_region_msg {
61         char port_name[RTE_DEV_NAME_MAX_LEN];
62         memif_region_index_t idx;
63         memif_region_size_t size;
64 };
65
66 static int
67 memif_mp_send_region(const struct rte_mp_msg *msg, const void *peer)
68 {
69         struct rte_eth_dev *dev;
70         struct pmd_process_private *proc_private;
71         const struct mp_region_msg *msg_param = (const struct mp_region_msg *)msg->param;
72         struct rte_mp_msg reply;
73         struct mp_region_msg *reply_param = (struct mp_region_msg *)reply.param;
74         uint16_t port_id;
75         int ret;
76
77         /* Get requested port */
78         ret = rte_eth_dev_get_port_by_name(msg_param->port_name, &port_id);
79         if (ret) {
80                 MIF_LOG(ERR, "Failed to get port id for %s",
81                         msg_param->port_name);
82                 return -1;
83         }
84         dev = &rte_eth_devices[port_id];
85         proc_private = dev->process_private;
86
87         memset(&reply, 0, sizeof(reply));
88         strlcpy(reply.name, msg->name, sizeof(reply.name));
89         reply_param->idx = msg_param->idx;
90         if (proc_private->regions[msg_param->idx] != NULL) {
91                 reply_param->size = proc_private->regions[msg_param->idx]->region_size;
92                 reply.fds[0] = proc_private->regions[msg_param->idx]->fd;
93                 reply.num_fds = 1;
94         }
95         reply.len_param = sizeof(*reply_param);
96         if (rte_mp_reply(&reply, peer) < 0) {
97                 MIF_LOG(ERR, "Failed to reply to an add region request");
98                 return -1;
99         }
100
101         return 0;
102 }
103
104 /*
105  * Request regions
106  * Called by secondary process, when ports link status goes up.
107  */
108 static int
109 memif_mp_request_regions(struct rte_eth_dev *dev)
110 {
111         int ret, i;
112         struct timespec timeout = {.tv_sec = 5, .tv_nsec = 0};
113         struct rte_mp_msg msg, *reply;
114         struct rte_mp_reply replies;
115         struct mp_region_msg *msg_param = (struct mp_region_msg *)msg.param;
116         struct mp_region_msg *reply_param;
117         struct memif_region *r;
118         struct pmd_process_private *proc_private = dev->process_private;
119
120         MIF_LOG(DEBUG, "Requesting memory regions");
121
122         for (i = 0; i < ETH_MEMIF_MAX_REGION_NUM; i++) {
123                 /* Prepare the message */
124                 memset(&msg, 0, sizeof(msg));
125                 strlcpy(msg.name, MEMIF_MP_SEND_REGION, sizeof(msg.name));
126                 strlcpy(msg_param->port_name, dev->data->name,
127                         sizeof(msg_param->port_name));
128                 msg_param->idx = i;
129                 msg.len_param = sizeof(*msg_param);
130
131                 /* Send message */
132                 ret = rte_mp_request_sync(&msg, &replies, &timeout);
133                 if (ret < 0 || replies.nb_received != 1) {
134                         MIF_LOG(ERR, "Failed to send mp msg: %d",
135                                 rte_errno);
136                         return -1;
137                 }
138
139                 reply = &replies.msgs[0];
140                 reply_param = (struct mp_region_msg *)reply->param;
141
142                 if (reply_param->size > 0) {
143                         r = rte_zmalloc("region", sizeof(struct memif_region), 0);
144                         if (r == NULL) {
145                                 MIF_LOG(ERR, "Failed to alloc memif region.");
146                                 free(reply);
147                                 return -ENOMEM;
148                         }
149                         r->region_size = reply_param->size;
150                         if (reply->num_fds < 1) {
151                                 MIF_LOG(ERR, "Missing file descriptor.");
152                                 free(reply);
153                                 return -1;
154                         }
155                         r->fd = reply->fds[0];
156                         r->addr = NULL;
157
158                         proc_private->regions[reply_param->idx] = r;
159                         proc_private->regions_num++;
160                 }
161                 free(reply);
162         }
163
164         return memif_connect(dev);
165 }
166
167 static int
168 memif_dev_info(struct rte_eth_dev *dev __rte_unused, struct rte_eth_dev_info *dev_info)
169 {
170         dev_info->max_mac_addrs = 1;
171         dev_info->max_rx_pktlen = (uint32_t)ETH_FRAME_LEN;
172         dev_info->max_rx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
173         dev_info->max_tx_queues = ETH_MEMIF_MAX_NUM_Q_PAIRS;
174         dev_info->min_rx_bufsize = 0;
175
176         return 0;
177 }
178
179 static memif_ring_t *
180 memif_get_ring(struct pmd_internals *pmd, struct pmd_process_private *proc_private,
181                memif_ring_type_t type, uint16_t ring_num)
182 {
183         /* rings only in region 0 */
184         void *p = proc_private->regions[0]->addr;
185         int ring_size = sizeof(memif_ring_t) + sizeof(memif_desc_t) *
186             (1 << pmd->run.log2_ring_size);
187
188         p = (uint8_t *)p + (ring_num + type * pmd->run.num_s2m_rings) * ring_size;
189
190         return (memif_ring_t *)p;
191 }
192
193 static memif_region_offset_t
194 memif_get_ring_offset(struct rte_eth_dev *dev, struct memif_queue *mq,
195                       memif_ring_type_t type, uint16_t num)
196 {
197         struct pmd_internals *pmd = dev->data->dev_private;
198         struct pmd_process_private *proc_private = dev->process_private;
199
200         return ((uint8_t *)memif_get_ring(pmd, proc_private, type, num) -
201                 (uint8_t *)proc_private->regions[mq->region]->addr);
202 }
203
204 static memif_ring_t *
205 memif_get_ring_from_queue(struct pmd_process_private *proc_private,
206                           struct memif_queue *mq)
207 {
208         struct memif_region *r;
209
210         r = proc_private->regions[mq->region];
211         if (r == NULL)
212                 return NULL;
213
214         return (memif_ring_t *)((uint8_t *)r->addr + mq->ring_offset);
215 }
216
217 static void *
218 memif_get_buffer(struct pmd_process_private *proc_private, memif_desc_t *d)
219 {
220         return ((uint8_t *)proc_private->regions[d->region]->addr + d->offset);
221 }
222
223 static int
224 memif_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *cur_tail,
225                     struct rte_mbuf *tail)
226 {
227         /* Check for number-of-segments-overflow */
228         if (unlikely(head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS))
229                 return -EOVERFLOW;
230
231         /* Chain 'tail' onto the old tail */
232         cur_tail->next = tail;
233
234         /* accumulate number of segments and total length. */
235         head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
236
237         tail->pkt_len = tail->data_len;
238         head->pkt_len += tail->pkt_len;
239
240         return 0;
241 }
242
243 static uint16_t
244 eth_memif_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
245 {
246         struct memif_queue *mq = queue;
247         struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
248         struct pmd_process_private *proc_private =
249                 rte_eth_devices[mq->in_port].process_private;
250         memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
251         uint16_t cur_slot, last_slot, n_slots, ring_size, mask, s0;
252         uint16_t n_rx_pkts = 0;
253         uint16_t mbuf_size = rte_pktmbuf_data_room_size(mq->mempool) -
254                 RTE_PKTMBUF_HEADROOM;
255         uint16_t src_len, src_off, dst_len, dst_off, cp_len;
256         memif_ring_type_t type = mq->type;
257         memif_desc_t *d0;
258         struct rte_mbuf *mbuf, *mbuf_head, *mbuf_tail;
259         uint64_t b;
260         ssize_t size __rte_unused;
261         uint16_t head;
262         int ret;
263         struct rte_eth_link link;
264
265         if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
266                 return 0;
267         if (unlikely(ring == NULL)) {
268                 /* Secondary process will attempt to request regions. */
269                 rte_eth_link_get(mq->in_port, &link);
270                 return 0;
271         }
272
273         /* consume interrupt */
274         if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0)
275                 size = read(mq->intr_handle.fd, &b, sizeof(b));
276
277         ring_size = 1 << mq->log2_ring_size;
278         mask = ring_size - 1;
279
280         cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
281         last_slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
282         if (cur_slot == last_slot)
283                 goto refill;
284         n_slots = last_slot - cur_slot;
285
286         while (n_slots && n_rx_pkts < nb_pkts) {
287                 mbuf_head = rte_pktmbuf_alloc(mq->mempool);
288                 if (unlikely(mbuf_head == NULL))
289                         goto no_free_bufs;
290                 mbuf = mbuf_head;
291                 mbuf->port = mq->in_port;
292
293 next_slot:
294                 s0 = cur_slot & mask;
295                 d0 = &ring->desc[s0];
296
297                 src_len = d0->length;
298                 dst_off = 0;
299                 src_off = 0;
300
301                 do {
302                         dst_len = mbuf_size - dst_off;
303                         if (dst_len == 0) {
304                                 dst_off = 0;
305                                 dst_len = mbuf_size;
306
307                                 /* store pointer to tail */
308                                 mbuf_tail = mbuf;
309                                 mbuf = rte_pktmbuf_alloc(mq->mempool);
310                                 if (unlikely(mbuf == NULL))
311                                         goto no_free_bufs;
312                                 mbuf->port = mq->in_port;
313                                 ret = memif_pktmbuf_chain(mbuf_head, mbuf_tail, mbuf);
314                                 if (unlikely(ret < 0)) {
315                                         MIF_LOG(ERR, "number-of-segments-overflow");
316                                         rte_pktmbuf_free(mbuf);
317                                         goto no_free_bufs;
318                                 }
319                         }
320                         cp_len = RTE_MIN(dst_len, src_len);
321
322                         rte_pktmbuf_data_len(mbuf) += cp_len;
323                         rte_pktmbuf_pkt_len(mbuf) = rte_pktmbuf_data_len(mbuf);
324                         if (mbuf != mbuf_head)
325                                 rte_pktmbuf_pkt_len(mbuf_head) += cp_len;
326
327                         memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, dst_off),
328                                (uint8_t *)memif_get_buffer(proc_private, d0) +
329                                src_off, cp_len);
330
331                         src_off += cp_len;
332                         dst_off += cp_len;
333                         src_len -= cp_len;
334                 } while (src_len);
335
336                 cur_slot++;
337                 n_slots--;
338
339                 if (d0->flags & MEMIF_DESC_FLAG_NEXT)
340                         goto next_slot;
341
342                 mq->n_bytes += rte_pktmbuf_pkt_len(mbuf_head);
343                 *bufs++ = mbuf_head;
344                 n_rx_pkts++;
345         }
346
347 no_free_bufs:
348         if (type == MEMIF_RING_S2M) {
349                 rte_mb();
350                 ring->tail = cur_slot;
351                 mq->last_head = cur_slot;
352         } else {
353                 mq->last_tail = cur_slot;
354         }
355
356 refill:
357         if (type == MEMIF_RING_M2S) {
358                 head = ring->head;
359                 n_slots = ring_size - head + mq->last_tail;
360
361                 while (n_slots--) {
362                         s0 = head++ & mask;
363                         d0 = &ring->desc[s0];
364                         d0->length = pmd->run.pkt_buffer_size;
365                 }
366                 rte_mb();
367                 ring->head = head;
368         }
369
370         mq->n_pkts += n_rx_pkts;
371         return n_rx_pkts;
372 }
373
374 static uint16_t
375 eth_memif_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
376 {
377         struct memif_queue *mq = queue;
378         struct pmd_internals *pmd = rte_eth_devices[mq->in_port].data->dev_private;
379         struct pmd_process_private *proc_private =
380                 rte_eth_devices[mq->in_port].process_private;
381         memif_ring_t *ring = memif_get_ring_from_queue(proc_private, mq);
382         uint16_t slot, saved_slot, n_free, ring_size, mask, n_tx_pkts = 0;
383         uint16_t src_len, src_off, dst_len, dst_off, cp_len;
384         memif_ring_type_t type = mq->type;
385         memif_desc_t *d0;
386         struct rte_mbuf *mbuf;
387         struct rte_mbuf *mbuf_head;
388         uint64_t a;
389         ssize_t size;
390         struct rte_eth_link link;
391
392         if (unlikely((pmd->flags & ETH_MEMIF_FLAG_CONNECTED) == 0))
393                 return 0;
394         if (unlikely(ring == NULL)) {
395                 /* Secondary process will attempt to request regions. */
396                 rte_eth_link_get(mq->in_port, &link);
397                 return 0;
398         }
399
400         ring_size = 1 << mq->log2_ring_size;
401         mask = ring_size - 1;
402
403         n_free = ring->tail - mq->last_tail;
404         mq->last_tail += n_free;
405         slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
406
407         if (type == MEMIF_RING_S2M)
408                 n_free = ring_size - ring->head + mq->last_tail;
409         else
410                 n_free = ring->head - ring->tail;
411
412         while (n_tx_pkts < nb_pkts && n_free) {
413                 mbuf_head = *bufs++;
414                 mbuf = mbuf_head;
415
416                 saved_slot = slot;
417                 d0 = &ring->desc[slot & mask];
418                 dst_off = 0;
419                 dst_len = (type == MEMIF_RING_S2M) ?
420                         pmd->run.pkt_buffer_size : d0->length;
421
422 next_in_chain:
423                 src_off = 0;
424                 src_len = rte_pktmbuf_data_len(mbuf);
425
426                 while (src_len) {
427                         if (dst_len == 0) {
428                                 if (n_free) {
429                                         slot++;
430                                         n_free--;
431                                         d0->flags |= MEMIF_DESC_FLAG_NEXT;
432                                         d0 = &ring->desc[slot & mask];
433                                         dst_off = 0;
434                                         dst_len = (type == MEMIF_RING_S2M) ?
435                                             pmd->run.pkt_buffer_size : d0->length;
436                                         d0->flags = 0;
437                                 } else {
438                                         slot = saved_slot;
439                                         goto no_free_slots;
440                                 }
441                         }
442                         cp_len = RTE_MIN(dst_len, src_len);
443
444                         memcpy((uint8_t *)memif_get_buffer(proc_private, d0) + dst_off,
445                                rte_pktmbuf_mtod_offset(mbuf, void *, src_off),
446                                cp_len);
447
448                         mq->n_bytes += cp_len;
449                         src_off += cp_len;
450                         dst_off += cp_len;
451                         src_len -= cp_len;
452                         dst_len -= cp_len;
453
454                         d0->length = dst_off;
455                 }
456
457                 if (rte_pktmbuf_is_contiguous(mbuf) == 0) {
458                         mbuf = mbuf->next;
459                         goto next_in_chain;
460                 }
461
462                 n_tx_pkts++;
463                 slot++;
464                 n_free--;
465                 rte_pktmbuf_free(mbuf_head);
466         }
467
468 no_free_slots:
469         rte_mb();
470         if (type == MEMIF_RING_S2M)
471                 ring->head = slot;
472         else
473                 ring->tail = slot;
474
475         if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0) {
476                 a = 1;
477                 size = write(mq->intr_handle.fd, &a, sizeof(a));
478                 if (unlikely(size < 0)) {
479                         MIF_LOG(WARNING,
480                                 "Failed to send interrupt. %s", strerror(errno));
481                 }
482         }
483
484         mq->n_pkts += n_tx_pkts;
485         return n_tx_pkts;
486 }
487
488 void
489 memif_free_regions(struct pmd_process_private *proc_private)
490 {
491         int i;
492         struct memif_region *r;
493
494         MIF_LOG(DEBUG, "Free memory regions");
495         /* regions are allocated contiguously, so it's
496          * enough to loop until 'proc_private->regions_num'
497          */
498         for (i = 0; i < proc_private->regions_num; i++) {
499                 r = proc_private->regions[i];
500                 if (r != NULL) {
501                         if (r->addr != NULL) {
502                                 munmap(r->addr, r->region_size);
503                                 if (r->fd > 0) {
504                                         close(r->fd);
505                                         r->fd = -1;
506                                 }
507                         }
508                         rte_free(r);
509                         proc_private->regions[i] = NULL;
510                 }
511         }
512         proc_private->regions_num = 0;
513 }
514
515 static int
516 memif_region_init_shm(struct rte_eth_dev *dev, uint8_t has_buffers)
517 {
518         struct pmd_internals *pmd = dev->data->dev_private;
519         struct pmd_process_private *proc_private = dev->process_private;
520         char shm_name[ETH_MEMIF_SHM_NAME_SIZE];
521         int ret = 0;
522         struct memif_region *r;
523
524         if (proc_private->regions_num >= ETH_MEMIF_MAX_REGION_NUM) {
525                 MIF_LOG(ERR, "Too many regions.");
526                 return -1;
527         }
528
529         r = rte_zmalloc("region", sizeof(struct memif_region), 0);
530         if (r == NULL) {
531                 MIF_LOG(ERR, "Failed to alloc memif region.");
532                 return -ENOMEM;
533         }
534
535         /* calculate buffer offset */
536         r->pkt_buffer_offset = (pmd->run.num_s2m_rings + pmd->run.num_m2s_rings) *
537             (sizeof(memif_ring_t) + sizeof(memif_desc_t) *
538             (1 << pmd->run.log2_ring_size));
539
540         r->region_size = r->pkt_buffer_offset;
541         /* if region has buffers, add buffers size to region_size */
542         if (has_buffers == 1)
543                 r->region_size += (uint32_t)(pmd->run.pkt_buffer_size *
544                         (1 << pmd->run.log2_ring_size) *
545                         (pmd->run.num_s2m_rings +
546                          pmd->run.num_m2s_rings));
547
548         memset(shm_name, 0, sizeof(char) * ETH_MEMIF_SHM_NAME_SIZE);
549         snprintf(shm_name, ETH_MEMIF_SHM_NAME_SIZE, "memif_region_%d",
550                  proc_private->regions_num);
551
552         r->fd = memfd_create(shm_name, MFD_ALLOW_SEALING);
553         if (r->fd < 0) {
554                 MIF_LOG(ERR, "Failed to create shm file: %s.", strerror(errno));
555                 ret = -1;
556                 goto error;
557         }
558
559         ret = fcntl(r->fd, F_ADD_SEALS, F_SEAL_SHRINK);
560         if (ret < 0) {
561                 MIF_LOG(ERR, "Failed to add seals to shm file: %s.", strerror(errno));
562                 goto error;
563         }
564
565         ret = ftruncate(r->fd, r->region_size);
566         if (ret < 0) {
567                 MIF_LOG(ERR, "Failed to truncate shm file: %s.", strerror(errno));
568                 goto error;
569         }
570
571         r->addr = mmap(NULL, r->region_size, PROT_READ |
572                        PROT_WRITE, MAP_SHARED, r->fd, 0);
573         if (r->addr == MAP_FAILED) {
574                 MIF_LOG(ERR, "Failed to mmap shm region: %s.", strerror(ret));
575                 ret = -1;
576                 goto error;
577         }
578
579         proc_private->regions[proc_private->regions_num] = r;
580         proc_private->regions_num++;
581
582         return ret;
583
584 error:
585         if (r->fd > 0)
586                 close(r->fd);
587         r->fd = -1;
588
589         return ret;
590 }
591
592 static int
593 memif_regions_init(struct rte_eth_dev *dev)
594 {
595         int ret;
596
597         /* create one buffer region */
598         ret = memif_region_init_shm(dev, /* has buffer */ 1);
599         if (ret < 0)
600                 return ret;
601
602         return 0;
603 }
604
605 static void
606 memif_init_rings(struct rte_eth_dev *dev)
607 {
608         struct pmd_internals *pmd = dev->data->dev_private;
609         struct pmd_process_private *proc_private = dev->process_private;
610         memif_ring_t *ring;
611         int i, j;
612         uint16_t slot;
613
614         for (i = 0; i < pmd->run.num_s2m_rings; i++) {
615                 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_S2M, i);
616                 ring->head = 0;
617                 ring->tail = 0;
618                 ring->cookie = MEMIF_COOKIE;
619                 ring->flags = 0;
620                 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
621                         slot = i * (1 << pmd->run.log2_ring_size) + j;
622                         ring->desc[j].region = 0;
623                         ring->desc[j].offset =
624                                 proc_private->regions[0]->pkt_buffer_offset +
625                                 (uint32_t)(slot * pmd->run.pkt_buffer_size);
626                         ring->desc[j].length = pmd->run.pkt_buffer_size;
627                 }
628         }
629
630         for (i = 0; i < pmd->run.num_m2s_rings; i++) {
631                 ring = memif_get_ring(pmd, proc_private, MEMIF_RING_M2S, i);
632                 ring->head = 0;
633                 ring->tail = 0;
634                 ring->cookie = MEMIF_COOKIE;
635                 ring->flags = 0;
636                 for (j = 0; j < (1 << pmd->run.log2_ring_size); j++) {
637                         slot = (i + pmd->run.num_s2m_rings) *
638                             (1 << pmd->run.log2_ring_size) + j;
639                         ring->desc[j].region = 0;
640                         ring->desc[j].offset =
641                                 proc_private->regions[0]->pkt_buffer_offset +
642                                 (uint32_t)(slot * pmd->run.pkt_buffer_size);
643                         ring->desc[j].length = pmd->run.pkt_buffer_size;
644                 }
645         }
646 }
647
648 /* called only by slave */
649 static void
650 memif_init_queues(struct rte_eth_dev *dev)
651 {
652         struct pmd_internals *pmd = dev->data->dev_private;
653         struct memif_queue *mq;
654         int i;
655
656         for (i = 0; i < pmd->run.num_s2m_rings; i++) {
657                 mq = dev->data->tx_queues[i];
658                 mq->log2_ring_size = pmd->run.log2_ring_size;
659                 /* queues located only in region 0 */
660                 mq->region = 0;
661                 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_S2M, i);
662                 mq->last_head = 0;
663                 mq->last_tail = 0;
664                 mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
665                 if (mq->intr_handle.fd < 0) {
666                         MIF_LOG(WARNING,
667                                 "Failed to create eventfd for tx queue %d: %s.", i,
668                                 strerror(errno));
669                 }
670         }
671
672         for (i = 0; i < pmd->run.num_m2s_rings; i++) {
673                 mq = dev->data->rx_queues[i];
674                 mq->log2_ring_size = pmd->run.log2_ring_size;
675                 /* queues located only in region 0 */
676                 mq->region = 0;
677                 mq->ring_offset = memif_get_ring_offset(dev, mq, MEMIF_RING_M2S, i);
678                 mq->last_head = 0;
679                 mq->last_tail = 0;
680                 mq->intr_handle.fd = eventfd(0, EFD_NONBLOCK);
681                 if (mq->intr_handle.fd < 0) {
682                         MIF_LOG(WARNING,
683                                 "Failed to create eventfd for rx queue %d: %s.", i,
684                                 strerror(errno));
685                 }
686         }
687 }
688
689 int
690 memif_init_regions_and_queues(struct rte_eth_dev *dev)
691 {
692         int ret;
693
694         ret = memif_regions_init(dev);
695         if (ret < 0)
696                 return ret;
697
698         memif_init_rings(dev);
699
700         memif_init_queues(dev);
701
702         return 0;
703 }
704
705 int
706 memif_connect(struct rte_eth_dev *dev)
707 {
708         struct pmd_internals *pmd = dev->data->dev_private;
709         struct pmd_process_private *proc_private = dev->process_private;
710         struct memif_region *mr;
711         struct memif_queue *mq;
712         memif_ring_t *ring;
713         int i;
714
715         for (i = 0; i < proc_private->regions_num; i++) {
716                 mr = proc_private->regions[i];
717                 if (mr != NULL) {
718                         if (mr->addr == NULL) {
719                                 if (mr->fd < 0)
720                                         return -1;
721                                 mr->addr = mmap(NULL, mr->region_size,
722                                                 PROT_READ | PROT_WRITE,
723                                                 MAP_SHARED, mr->fd, 0);
724                                 if (mr->addr == NULL)
725                                         return -1;
726                         }
727                 }
728         }
729
730         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
731                 for (i = 0; i < pmd->run.num_s2m_rings; i++) {
732                         mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
733                             dev->data->tx_queues[i] : dev->data->rx_queues[i];
734                         ring = memif_get_ring_from_queue(proc_private, mq);
735                         if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
736                                 MIF_LOG(ERR, "Wrong ring");
737                                 return -1;
738                         }
739                         ring->head = 0;
740                         ring->tail = 0;
741                         mq->last_head = 0;
742                         mq->last_tail = 0;
743                         /* enable polling mode */
744                         if (pmd->role == MEMIF_ROLE_MASTER)
745                                 ring->flags = MEMIF_RING_FLAG_MASK_INT;
746                 }
747                 for (i = 0; i < pmd->run.num_m2s_rings; i++) {
748                         mq = (pmd->role == MEMIF_ROLE_SLAVE) ?
749                             dev->data->rx_queues[i] : dev->data->tx_queues[i];
750                         ring = memif_get_ring_from_queue(proc_private, mq);
751                         if (ring == NULL || ring->cookie != MEMIF_COOKIE) {
752                                 MIF_LOG(ERR, "Wrong ring");
753                                 return -1;
754                         }
755                         ring->head = 0;
756                         ring->tail = 0;
757                         mq->last_head = 0;
758                         mq->last_tail = 0;
759                         /* enable polling mode */
760                         if (pmd->role == MEMIF_ROLE_SLAVE)
761                                 ring->flags = MEMIF_RING_FLAG_MASK_INT;
762                 }
763
764                 pmd->flags &= ~ETH_MEMIF_FLAG_CONNECTING;
765                 pmd->flags |= ETH_MEMIF_FLAG_CONNECTED;
766                 dev->data->dev_link.link_status = ETH_LINK_UP;
767         }
768         MIF_LOG(INFO, "Connected.");
769         return 0;
770 }
771
772 static int
773 memif_dev_start(struct rte_eth_dev *dev)
774 {
775         struct pmd_internals *pmd = dev->data->dev_private;
776         int ret = 0;
777
778         switch (pmd->role) {
779         case MEMIF_ROLE_SLAVE:
780                 ret = memif_connect_slave(dev);
781                 break;
782         case MEMIF_ROLE_MASTER:
783                 ret = memif_connect_master(dev);
784                 break;
785         default:
786                 MIF_LOG(ERR, "%s: Unknown role: %d.",
787                         rte_vdev_device_name(pmd->vdev), pmd->role);
788                 ret = -1;
789                 break;
790         }
791
792         return ret;
793 }
794
795 static void
796 memif_dev_close(struct rte_eth_dev *dev)
797 {
798         struct pmd_internals *pmd = dev->data->dev_private;
799         int i;
800
801         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
802                 memif_msg_enq_disconnect(pmd->cc, "Device closed", 0);
803                 memif_disconnect(dev);
804
805                 for (i = 0; i < dev->data->nb_rx_queues; i++)
806                         (*dev->dev_ops->rx_queue_release)(dev->data->rx_queues[i]);
807                 for (i = 0; i < dev->data->nb_tx_queues; i++)
808                         (*dev->dev_ops->tx_queue_release)(dev->data->tx_queues[i]);
809
810                 memif_socket_remove_device(dev);
811         } else {
812                 memif_disconnect(dev);
813         }
814
815         rte_free(dev->process_private);
816 }
817
818 static int
819 memif_dev_configure(struct rte_eth_dev *dev)
820 {
821         struct pmd_internals *pmd = dev->data->dev_private;
822
823         /*
824          * SLAVE - TXQ
825          * MASTER - RXQ
826          */
827         pmd->cfg.num_s2m_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
828                                   dev->data->nb_tx_queues : dev->data->nb_rx_queues;
829
830         /*
831          * SLAVE - RXQ
832          * MASTER - TXQ
833          */
834         pmd->cfg.num_m2s_rings = (pmd->role == MEMIF_ROLE_SLAVE) ?
835                                   dev->data->nb_rx_queues : dev->data->nb_tx_queues;
836
837         return 0;
838 }
839
840 static int
841 memif_tx_queue_setup(struct rte_eth_dev *dev,
842                      uint16_t qid,
843                      uint16_t nb_tx_desc __rte_unused,
844                      unsigned int socket_id __rte_unused,
845                      const struct rte_eth_txconf *tx_conf __rte_unused)
846 {
847         struct pmd_internals *pmd = dev->data->dev_private;
848         struct memif_queue *mq;
849
850         mq = rte_zmalloc("tx-queue", sizeof(struct memif_queue), 0);
851         if (mq == NULL) {
852                 MIF_LOG(ERR, "%s: Failed to allocate tx queue id: %u",
853                         rte_vdev_device_name(pmd->vdev), qid);
854                 return -ENOMEM;
855         }
856
857         mq->type =
858             (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_S2M : MEMIF_RING_M2S;
859         mq->n_pkts = 0;
860         mq->n_bytes = 0;
861         mq->intr_handle.fd = -1;
862         mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
863         dev->data->tx_queues[qid] = mq;
864
865         return 0;
866 }
867
868 static int
869 memif_rx_queue_setup(struct rte_eth_dev *dev,
870                      uint16_t qid,
871                      uint16_t nb_rx_desc __rte_unused,
872                      unsigned int socket_id __rte_unused,
873                      const struct rte_eth_rxconf *rx_conf __rte_unused,
874                      struct rte_mempool *mb_pool)
875 {
876         struct pmd_internals *pmd = dev->data->dev_private;
877         struct memif_queue *mq;
878
879         mq = rte_zmalloc("rx-queue", sizeof(struct memif_queue), 0);
880         if (mq == NULL) {
881                 MIF_LOG(ERR, "%s: Failed to allocate rx queue id: %u",
882                         rte_vdev_device_name(pmd->vdev), qid);
883                 return -ENOMEM;
884         }
885
886         mq->type = (pmd->role == MEMIF_ROLE_SLAVE) ? MEMIF_RING_M2S : MEMIF_RING_S2M;
887         mq->n_pkts = 0;
888         mq->n_bytes = 0;
889         mq->intr_handle.fd = -1;
890         mq->intr_handle.type = RTE_INTR_HANDLE_EXT;
891         mq->mempool = mb_pool;
892         mq->in_port = dev->data->port_id;
893         dev->data->rx_queues[qid] = mq;
894
895         return 0;
896 }
897
898 static void
899 memif_queue_release(void *queue)
900 {
901         struct memif_queue *mq = (struct memif_queue *)queue;
902
903         if (!mq)
904                 return;
905
906         rte_free(mq);
907 }
908
909 static int
910 memif_link_update(struct rte_eth_dev *dev,
911                   int wait_to_complete __rte_unused)
912 {
913         struct pmd_process_private *proc_private;
914
915         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
916                 proc_private = dev->process_private;
917                 if (dev->data->dev_link.link_status == ETH_LINK_UP &&
918                                 proc_private->regions_num == 0) {
919                         memif_mp_request_regions(dev);
920                 } else if (dev->data->dev_link.link_status == ETH_LINK_DOWN &&
921                                 proc_private->regions_num > 0) {
922                         memif_free_regions(proc_private);
923                 }
924         }
925         return 0;
926 }
927
928 static int
929 memif_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
930 {
931         struct pmd_internals *pmd = dev->data->dev_private;
932         struct memif_queue *mq;
933         int i;
934         uint8_t tmp, nq;
935
936         stats->ipackets = 0;
937         stats->ibytes = 0;
938         stats->opackets = 0;
939         stats->obytes = 0;
940
941         tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_s2m_rings :
942             pmd->run.num_m2s_rings;
943         nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
944             RTE_ETHDEV_QUEUE_STAT_CNTRS;
945
946         /* RX stats */
947         for (i = 0; i < nq; i++) {
948                 mq = dev->data->rx_queues[i];
949                 stats->q_ipackets[i] = mq->n_pkts;
950                 stats->q_ibytes[i] = mq->n_bytes;
951                 stats->ipackets += mq->n_pkts;
952                 stats->ibytes += mq->n_bytes;
953         }
954
955         tmp = (pmd->role == MEMIF_ROLE_SLAVE) ? pmd->run.num_m2s_rings :
956             pmd->run.num_s2m_rings;
957         nq = (tmp < RTE_ETHDEV_QUEUE_STAT_CNTRS) ? tmp :
958             RTE_ETHDEV_QUEUE_STAT_CNTRS;
959
960         /* TX stats */
961         for (i = 0; i < nq; i++) {
962                 mq = dev->data->tx_queues[i];
963                 stats->q_opackets[i] = mq->n_pkts;
964                 stats->q_obytes[i] = mq->n_bytes;
965                 stats->opackets += mq->n_pkts;
966                 stats->obytes += mq->n_bytes;
967         }
968         return 0;
969 }
970
971 static int
972 memif_stats_reset(struct rte_eth_dev *dev)
973 {
974         struct pmd_internals *pmd = dev->data->dev_private;
975         int i;
976         struct memif_queue *mq;
977
978         for (i = 0; i < pmd->run.num_s2m_rings; i++) {
979                 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->tx_queues[i] :
980                     dev->data->rx_queues[i];
981                 mq->n_pkts = 0;
982                 mq->n_bytes = 0;
983         }
984         for (i = 0; i < pmd->run.num_m2s_rings; i++) {
985                 mq = (pmd->role == MEMIF_ROLE_SLAVE) ? dev->data->rx_queues[i] :
986                     dev->data->tx_queues[i];
987                 mq->n_pkts = 0;
988                 mq->n_bytes = 0;
989         }
990
991         return 0;
992 }
993
994 static int
995 memif_rx_queue_intr_enable(struct rte_eth_dev *dev __rte_unused,
996                            uint16_t qid __rte_unused)
997 {
998         MIF_LOG(WARNING, "Interrupt mode not supported.");
999
1000         return -1;
1001 }
1002
1003 static int
1004 memif_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t qid __rte_unused)
1005 {
1006         struct pmd_internals *pmd __rte_unused = dev->data->dev_private;
1007
1008         return 0;
1009 }
1010
1011 static const struct eth_dev_ops ops = {
1012         .dev_start = memif_dev_start,
1013         .dev_close = memif_dev_close,
1014         .dev_infos_get = memif_dev_info,
1015         .dev_configure = memif_dev_configure,
1016         .tx_queue_setup = memif_tx_queue_setup,
1017         .rx_queue_setup = memif_rx_queue_setup,
1018         .rx_queue_release = memif_queue_release,
1019         .tx_queue_release = memif_queue_release,
1020         .rx_queue_intr_enable = memif_rx_queue_intr_enable,
1021         .rx_queue_intr_disable = memif_rx_queue_intr_disable,
1022         .link_update = memif_link_update,
1023         .stats_get = memif_stats_get,
1024         .stats_reset = memif_stats_reset,
1025 };
1026
1027 static int
1028 memif_create(struct rte_vdev_device *vdev, enum memif_role_t role,
1029              memif_interface_id_t id, uint32_t flags,
1030              const char *socket_filename,
1031              memif_log2_ring_size_t log2_ring_size,
1032              uint16_t pkt_buffer_size, const char *secret,
1033              struct rte_ether_addr *ether_addr)
1034 {
1035         int ret = 0;
1036         struct rte_eth_dev *eth_dev;
1037         struct rte_eth_dev_data *data;
1038         struct pmd_internals *pmd;
1039         struct pmd_process_private *process_private;
1040         const unsigned int numa_node = vdev->device.numa_node;
1041         const char *name = rte_vdev_device_name(vdev);
1042
1043         if (flags & ETH_MEMIF_FLAG_ZERO_COPY) {
1044                 MIF_LOG(ERR, "Zero-copy slave not supported.");
1045                 return -1;
1046         }
1047
1048         eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*pmd));
1049         if (eth_dev == NULL) {
1050                 MIF_LOG(ERR, "%s: Unable to allocate device struct.", name);
1051                 return -1;
1052         }
1053
1054         process_private = (struct pmd_process_private *)
1055                 rte_zmalloc(name, sizeof(struct pmd_process_private),
1056                             RTE_CACHE_LINE_SIZE);
1057
1058         if (process_private == NULL) {
1059                 MIF_LOG(ERR, "Failed to alloc memory for process private");
1060                 return -1;
1061         }
1062         eth_dev->process_private = process_private;
1063
1064         pmd = eth_dev->data->dev_private;
1065         memset(pmd, 0, sizeof(*pmd));
1066
1067         pmd->id = id;
1068         pmd->flags = flags;
1069         pmd->flags |= ETH_MEMIF_FLAG_DISABLED;
1070         pmd->role = role;
1071
1072         ret = memif_socket_init(eth_dev, socket_filename);
1073         if (ret < 0)
1074                 return ret;
1075
1076         memset(pmd->secret, 0, sizeof(char) * ETH_MEMIF_SECRET_SIZE);
1077         if (secret != NULL)
1078                 strlcpy(pmd->secret, secret, sizeof(pmd->secret));
1079
1080         pmd->cfg.log2_ring_size = log2_ring_size;
1081         /* set in .dev_configure() */
1082         pmd->cfg.num_s2m_rings = 0;
1083         pmd->cfg.num_m2s_rings = 0;
1084
1085         pmd->cfg.pkt_buffer_size = pkt_buffer_size;
1086
1087         data = eth_dev->data;
1088         data->dev_private = pmd;
1089         data->numa_node = numa_node;
1090         data->mac_addrs = ether_addr;
1091
1092         eth_dev->dev_ops = &ops;
1093         eth_dev->device = &vdev->device;
1094         eth_dev->rx_pkt_burst = eth_memif_rx;
1095         eth_dev->tx_pkt_burst = eth_memif_tx;
1096
1097         eth_dev->data->dev_flags &= RTE_ETH_DEV_CLOSE_REMOVE;
1098
1099         rte_eth_dev_probing_finish(eth_dev);
1100
1101         return 0;
1102 }
1103
1104 static int
1105 memif_set_role(const char *key __rte_unused, const char *value,
1106                void *extra_args)
1107 {
1108         enum memif_role_t *role = (enum memif_role_t *)extra_args;
1109
1110         if (strstr(value, "master") != NULL) {
1111                 *role = MEMIF_ROLE_MASTER;
1112         } else if (strstr(value, "slave") != NULL) {
1113                 *role = MEMIF_ROLE_SLAVE;
1114         } else {
1115                 MIF_LOG(ERR, "Unknown role: %s.", value);
1116                 return -EINVAL;
1117         }
1118         return 0;
1119 }
1120
1121 static int
1122 memif_set_zc(const char *key __rte_unused, const char *value, void *extra_args)
1123 {
1124         uint32_t *flags = (uint32_t *)extra_args;
1125
1126         if (strstr(value, "yes") != NULL) {
1127                 *flags |= ETH_MEMIF_FLAG_ZERO_COPY;
1128         } else if (strstr(value, "no") != NULL) {
1129                 *flags &= ~ETH_MEMIF_FLAG_ZERO_COPY;
1130         } else {
1131                 MIF_LOG(ERR, "Failed to parse zero-copy param: %s.", value);
1132                 return -EINVAL;
1133         }
1134         return 0;
1135 }
1136
1137 static int
1138 memif_set_id(const char *key __rte_unused, const char *value, void *extra_args)
1139 {
1140         memif_interface_id_t *id = (memif_interface_id_t *)extra_args;
1141
1142         /* even if parsing fails, 0 is a valid id */
1143         *id = strtoul(value, NULL, 10);
1144         return 0;
1145 }
1146
1147 static int
1148 memif_set_bs(const char *key __rte_unused, const char *value, void *extra_args)
1149 {
1150         unsigned long tmp;
1151         uint16_t *pkt_buffer_size = (uint16_t *)extra_args;
1152
1153         tmp = strtoul(value, NULL, 10);
1154         if (tmp == 0 || tmp > 0xFFFF) {
1155                 MIF_LOG(ERR, "Invalid buffer size: %s.", value);
1156                 return -EINVAL;
1157         }
1158         *pkt_buffer_size = tmp;
1159         return 0;
1160 }
1161
1162 static int
1163 memif_set_rs(const char *key __rte_unused, const char *value, void *extra_args)
1164 {
1165         unsigned long tmp;
1166         memif_log2_ring_size_t *log2_ring_size =
1167             (memif_log2_ring_size_t *)extra_args;
1168
1169         tmp = strtoul(value, NULL, 10);
1170         if (tmp == 0 || tmp > ETH_MEMIF_MAX_LOG2_RING_SIZE) {
1171                 MIF_LOG(ERR, "Invalid ring size: %s (max %u).",
1172                         value, ETH_MEMIF_MAX_LOG2_RING_SIZE);
1173                 return -EINVAL;
1174         }
1175         *log2_ring_size = tmp;
1176         return 0;
1177 }
1178
1179 /* check if directory exists and if we have permission to read/write */
1180 static int
1181 memif_check_socket_filename(const char *filename)
1182 {
1183         char *dir = NULL, *tmp;
1184         uint32_t idx;
1185         int ret = 0;
1186
1187         tmp = strrchr(filename, '/');
1188         if (tmp != NULL) {
1189                 idx = tmp - filename;
1190                 dir = rte_zmalloc("memif_tmp", sizeof(char) * (idx + 1), 0);
1191                 if (dir == NULL) {
1192                         MIF_LOG(ERR, "Failed to allocate memory.");
1193                         return -1;
1194                 }
1195                 strlcpy(dir, filename, sizeof(char) * (idx + 1));
1196         }
1197
1198         if (dir == NULL || (faccessat(-1, dir, F_OK | R_OK |
1199                                         W_OK, AT_EACCESS) < 0)) {
1200                 MIF_LOG(ERR, "Invalid socket directory.");
1201                 ret = -EINVAL;
1202         }
1203
1204         if (dir != NULL)
1205                 rte_free(dir);
1206
1207         return ret;
1208 }
1209
1210 static int
1211 memif_set_socket_filename(const char *key __rte_unused, const char *value,
1212                           void *extra_args)
1213 {
1214         const char **socket_filename = (const char **)extra_args;
1215
1216         *socket_filename = value;
1217         return memif_check_socket_filename(*socket_filename);
1218 }
1219
1220 static int
1221 memif_set_mac(const char *key __rte_unused, const char *value, void *extra_args)
1222 {
1223         struct rte_ether_addr *ether_addr = (struct rte_ether_addr *)extra_args;
1224
1225         if (rte_ether_unformat_addr(value, ether_addr) < 0)
1226                 MIF_LOG(WARNING, "Failed to parse mac '%s'.", value);
1227         return 0;
1228 }
1229
1230 static int
1231 memif_set_secret(const char *key __rte_unused, const char *value, void *extra_args)
1232 {
1233         const char **secret = (const char **)extra_args;
1234
1235         *secret = value;
1236         return 0;
1237 }
1238
1239 static int
1240 rte_pmd_memif_probe(struct rte_vdev_device *vdev)
1241 {
1242         RTE_BUILD_BUG_ON(sizeof(memif_msg_t) != 128);
1243         RTE_BUILD_BUG_ON(sizeof(memif_desc_t) != 16);
1244         int ret = 0;
1245         struct rte_kvargs *kvlist;
1246         const char *name = rte_vdev_device_name(vdev);
1247         enum memif_role_t role = MEMIF_ROLE_SLAVE;
1248         memif_interface_id_t id = 0;
1249         uint16_t pkt_buffer_size = ETH_MEMIF_DEFAULT_PKT_BUFFER_SIZE;
1250         memif_log2_ring_size_t log2_ring_size = ETH_MEMIF_DEFAULT_RING_SIZE;
1251         const char *socket_filename = ETH_MEMIF_DEFAULT_SOCKET_FILENAME;
1252         uint32_t flags = 0;
1253         const char *secret = NULL;
1254         struct rte_ether_addr *ether_addr = rte_zmalloc("",
1255                 sizeof(struct rte_ether_addr), 0);
1256         struct rte_eth_dev *eth_dev;
1257
1258         rte_eth_random_addr(ether_addr->addr_bytes);
1259
1260         MIF_LOG(INFO, "Initialize MEMIF: %s.", name);
1261
1262         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1263                 eth_dev = rte_eth_dev_attach_secondary(name);
1264                 if (!eth_dev) {
1265                         MIF_LOG(ERR, "Failed to probe %s", name);
1266                         return -1;
1267                 }
1268
1269                 eth_dev->dev_ops = &ops;
1270                 eth_dev->device = &vdev->device;
1271                 eth_dev->rx_pkt_burst = eth_memif_rx;
1272                 eth_dev->tx_pkt_burst = eth_memif_tx;
1273
1274                 if (!rte_eal_primary_proc_alive(NULL)) {
1275                         MIF_LOG(ERR, "Primary process is missing");
1276                         return -1;
1277                 }
1278
1279                 eth_dev->process_private = (struct pmd_process_private *)
1280                         rte_zmalloc(name,
1281                                 sizeof(struct pmd_process_private),
1282                                 RTE_CACHE_LINE_SIZE);
1283                 if (eth_dev->process_private == NULL) {
1284                         MIF_LOG(ERR,
1285                                 "Failed to alloc memory for process private");
1286                         return -1;
1287                 }
1288
1289                 rte_eth_dev_probing_finish(eth_dev);
1290
1291                 return 0;
1292         }
1293
1294         ret = rte_mp_action_register(MEMIF_MP_SEND_REGION, memif_mp_send_region);
1295         /*
1296          * Primary process can continue probing, but secondary process won't
1297          * be able to get memory regions information
1298          */
1299         if (ret < 0 && rte_errno != EEXIST)
1300                 MIF_LOG(WARNING, "Failed to register mp action callback: %s",
1301                         strerror(rte_errno));
1302
1303         kvlist = rte_kvargs_parse(rte_vdev_device_args(vdev), valid_arguments);
1304
1305         /* parse parameters */
1306         if (kvlist != NULL) {
1307                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ROLE_ARG,
1308                                          &memif_set_role, &role);
1309                 if (ret < 0)
1310                         goto exit;
1311                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ID_ARG,
1312                                          &memif_set_id, &id);
1313                 if (ret < 0)
1314                         goto exit;
1315                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_PKT_BUFFER_SIZE_ARG,
1316                                          &memif_set_bs, &pkt_buffer_size);
1317                 if (ret < 0)
1318                         goto exit;
1319                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_RING_SIZE_ARG,
1320                                          &memif_set_rs, &log2_ring_size);
1321                 if (ret < 0)
1322                         goto exit;
1323                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SOCKET_ARG,
1324                                          &memif_set_socket_filename,
1325                                          (void *)(&socket_filename));
1326                 if (ret < 0)
1327                         goto exit;
1328                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_MAC_ARG,
1329                                          &memif_set_mac, ether_addr);
1330                 if (ret < 0)
1331                         goto exit;
1332                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_ZC_ARG,
1333                                          &memif_set_zc, &flags);
1334                 if (ret < 0)
1335                         goto exit;
1336                 ret = rte_kvargs_process(kvlist, ETH_MEMIF_SECRET_ARG,
1337                                          &memif_set_secret, (void *)(&secret));
1338                 if (ret < 0)
1339                         goto exit;
1340         }
1341
1342         /* create interface */
1343         ret = memif_create(vdev, role, id, flags, socket_filename,
1344                            log2_ring_size, pkt_buffer_size, secret, ether_addr);
1345
1346 exit:
1347         if (kvlist != NULL)
1348                 rte_kvargs_free(kvlist);
1349         return ret;
1350 }
1351
1352 static int
1353 rte_pmd_memif_remove(struct rte_vdev_device *vdev)
1354 {
1355         struct rte_eth_dev *eth_dev;
1356
1357         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
1358         if (eth_dev == NULL)
1359                 return 0;
1360
1361         rte_eth_dev_close(eth_dev->data->port_id);
1362
1363         return 0;
1364 }
1365
1366 static struct rte_vdev_driver pmd_memif_drv = {
1367         .probe = rte_pmd_memif_probe,
1368         .remove = rte_pmd_memif_remove,
1369 };
1370
1371 RTE_PMD_REGISTER_VDEV(net_memif, pmd_memif_drv);
1372
1373 RTE_PMD_REGISTER_PARAM_STRING(net_memif,
1374                               ETH_MEMIF_ID_ARG "=<int>"
1375                               ETH_MEMIF_ROLE_ARG "=master|slave"
1376                               ETH_MEMIF_PKT_BUFFER_SIZE_ARG "=<int>"
1377                               ETH_MEMIF_RING_SIZE_ARG "=<int>"
1378                               ETH_MEMIF_SOCKET_ARG "=<string>"
1379                               ETH_MEMIF_MAC_ARG "=xx:xx:xx:xx:xx:xx"
1380                               ETH_MEMIF_ZC_ARG "=yes|no"
1381                               ETH_MEMIF_SECRET_ARG "=<string>");
1382
1383 int memif_logtype;
1384
1385 RTE_INIT(memif_init_log)
1386 {
1387         memif_logtype = rte_log_register("pmd.net.memif");
1388         if (memif_logtype >= 0)
1389                 rte_log_set_level(memif_logtype, RTE_LOG_NOTICE);
1390 }