net/octeontx: handle port reconfiguration
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <netinet/in.h>
9 #include <net/if.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include "af_xdp_deps.h"
18
19 #include <rte_ethdev.h>
20 #include <ethdev_driver.h>
21 #include <ethdev_vdev.h>
22 #include <rte_kvargs.h>
23 #include <rte_bus_vdev.h>
24 #include <rte_string_fns.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_common.h>
27 #include <rte_dev.h>
28 #include <rte_eal.h>
29 #include <rte_ether.h>
30 #include <rte_lcore.h>
31 #include <rte_log.h>
32 #include <rte_memory.h>
33 #include <rte_memzone.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 #include <rte_malloc.h>
37 #include <rte_ring.h>
38 #include <rte_spinlock.h>
39 #include <rte_power_intrinsics.h>
40
41 #include "compat.h"
42
43 #ifndef SO_PREFER_BUSY_POLL
44 #define SO_PREFER_BUSY_POLL 69
45 #endif
46 #ifndef SO_BUSY_POLL_BUDGET
47 #define SO_BUSY_POLL_BUDGET 70
48 #endif
49
50
51 #ifndef SOL_XDP
52 #define SOL_XDP 283
53 #endif
54
55 #ifndef AF_XDP
56 #define AF_XDP 44
57 #endif
58
59 #ifndef PF_XDP
60 #define PF_XDP AF_XDP
61 #endif
62
63 RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
64
65 #define AF_XDP_LOG(level, fmt, args...)                 \
66         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
67                 "%s(): " fmt, __func__, ##args)
68
69 #define ETH_AF_XDP_FRAME_SIZE           2048
70 #define ETH_AF_XDP_NUM_BUFFERS          4096
71 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
72 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
73 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
74 #define ETH_AF_XDP_DFLT_BUSY_BUDGET     64
75 #define ETH_AF_XDP_DFLT_BUSY_TIMEOUT    20
76
77 #define ETH_AF_XDP_RX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
78 #define ETH_AF_XDP_TX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
79
80 #define ETH_AF_XDP_ETH_OVERHEAD         (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
81
82 #define ETH_AF_XDP_MP_KEY "afxdp_mp_send_fds"
83
84 static int afxdp_dev_count;
85
86 /* Message header to synchronize fds via IPC */
87 struct ipc_hdr {
88         char port_name[RTE_DEV_NAME_MAX_LEN];
89         /* The file descriptors are in the dedicated part
90          * of the Unix message to be translated by the kernel.
91          */
92 };
93
94 struct xsk_umem_info {
95         struct xsk_umem *umem;
96         struct rte_ring *buf_ring;
97         const struct rte_memzone *mz;
98         struct rte_mempool *mb_pool;
99         void *buffer;
100         uint8_t refcnt;
101         uint32_t max_xsks;
102 };
103
104 struct rx_stats {
105         uint64_t rx_pkts;
106         uint64_t rx_bytes;
107         uint64_t rx_dropped;
108 };
109
110 struct pkt_rx_queue {
111         struct xsk_ring_cons rx;
112         struct xsk_umem_info *umem;
113         struct xsk_socket *xsk;
114         struct rte_mempool *mb_pool;
115
116         struct rx_stats stats;
117
118         struct xsk_ring_prod fq;
119         struct xsk_ring_cons cq;
120
121         struct pkt_tx_queue *pair;
122         struct pollfd fds[1];
123         int xsk_queue_idx;
124         int busy_budget;
125 };
126
127 struct tx_stats {
128         uint64_t tx_pkts;
129         uint64_t tx_bytes;
130         uint64_t tx_dropped;
131 };
132
133 struct pkt_tx_queue {
134         struct xsk_ring_prod tx;
135         struct xsk_umem_info *umem;
136
137         struct tx_stats stats;
138
139         struct pkt_rx_queue *pair;
140         int xsk_queue_idx;
141 };
142
143 struct pmd_internals {
144         int if_index;
145         char if_name[IFNAMSIZ];
146         int start_queue_idx;
147         int queue_cnt;
148         int max_queue_cnt;
149         int combined_queue_cnt;
150         bool shared_umem;
151         char prog_path[PATH_MAX];
152         bool custom_prog_configured;
153         struct bpf_map *map;
154
155         struct rte_ether_addr eth_addr;
156
157         struct pkt_rx_queue *rx_queues;
158         struct pkt_tx_queue *tx_queues;
159 };
160
161 struct pmd_process_private {
162         int rxq_xsk_fds[RTE_MAX_QUEUES_PER_PORT];
163 };
164
165 #define ETH_AF_XDP_IFACE_ARG                    "iface"
166 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
167 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
168 #define ETH_AF_XDP_SHARED_UMEM_ARG              "shared_umem"
169 #define ETH_AF_XDP_PROG_ARG                     "xdp_prog"
170 #define ETH_AF_XDP_BUDGET_ARG                   "busy_budget"
171
172 static const char * const valid_arguments[] = {
173         ETH_AF_XDP_IFACE_ARG,
174         ETH_AF_XDP_START_QUEUE_ARG,
175         ETH_AF_XDP_QUEUE_COUNT_ARG,
176         ETH_AF_XDP_SHARED_UMEM_ARG,
177         ETH_AF_XDP_PROG_ARG,
178         ETH_AF_XDP_BUDGET_ARG,
179         NULL
180 };
181
182 static const struct rte_eth_link pmd_link = {
183         .link_speed = RTE_ETH_SPEED_NUM_10G,
184         .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
185         .link_status = RTE_ETH_LINK_DOWN,
186         .link_autoneg = RTE_ETH_LINK_AUTONEG
187 };
188
189 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
190 struct internal_list {
191         TAILQ_ENTRY(internal_list) next;
192         struct rte_eth_dev *eth_dev;
193 };
194
195 TAILQ_HEAD(internal_list_head, internal_list);
196 static struct internal_list_head internal_list =
197         TAILQ_HEAD_INITIALIZER(internal_list);
198
199 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
200
201 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
202 static inline int
203 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
204                       struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
205 {
206         uint32_t idx;
207         uint16_t i;
208
209         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
210                 for (i = 0; i < reserve_size; i++)
211                         rte_pktmbuf_free(bufs[i]);
212                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
213                 return -1;
214         }
215
216         for (i = 0; i < reserve_size; i++) {
217                 __u64 *fq_addr;
218                 uint64_t addr;
219
220                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
221                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
222                                 umem->mb_pool->header_size;
223                 *fq_addr = addr;
224         }
225
226         xsk_ring_prod__submit(fq, reserve_size);
227
228         return 0;
229 }
230 #else
231 static inline int
232 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
233                       struct rte_mbuf **bufs __rte_unused,
234                       struct xsk_ring_prod *fq)
235 {
236         void *addrs[reserve_size];
237         uint32_t idx;
238         uint16_t i;
239
240         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
241                     != reserve_size) {
242                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
243                 return -1;
244         }
245
246         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
247                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
248                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
249                                 reserve_size, NULL);
250                 return -1;
251         }
252
253         for (i = 0; i < reserve_size; i++) {
254                 __u64 *fq_addr;
255
256                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
257                 *fq_addr = (uint64_t)addrs[i];
258         }
259
260         xsk_ring_prod__submit(fq, reserve_size);
261
262         return 0;
263 }
264 #endif
265
266 static inline int
267 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
268                    struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
269 {
270 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
271         return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
272 #else
273         return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
274 #endif
275 }
276
277 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
278 static uint16_t
279 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
280 {
281         struct pkt_rx_queue *rxq = queue;
282         struct xsk_ring_cons *rx = &rxq->rx;
283         struct xsk_ring_prod *fq = &rxq->fq;
284         struct xsk_umem_info *umem = rxq->umem;
285         uint32_t idx_rx = 0;
286         unsigned long rx_bytes = 0;
287         int i;
288         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
289
290         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
291
292         if (nb_pkts == 0) {
293                 /* we can assume a kernel >= 5.11 is in use if busy polling is
294                  * enabled and thus we can safely use the recvfrom() syscall
295                  * which is only supported for AF_XDP sockets in kernels >=
296                  * 5.11.
297                  */
298                 if (rxq->busy_budget) {
299                         (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
300                                        MSG_DONTWAIT, NULL, NULL);
301                 } else if (xsk_ring_prod__needs_wakeup(fq)) {
302                         (void)poll(&rxq->fds[0], 1, 1000);
303                 }
304
305                 return 0;
306         }
307
308         /* allocate bufs for fill queue replenishment after rx */
309         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
310                 AF_XDP_LOG(DEBUG,
311                         "Failed to get enough buffers for fq.\n");
312                 /* rollback cached_cons which is added by
313                  * xsk_ring_cons__peek
314                  */
315                 rx->cached_cons -= nb_pkts;
316                 return 0;
317         }
318
319         for (i = 0; i < nb_pkts; i++) {
320                 const struct xdp_desc *desc;
321                 uint64_t addr;
322                 uint32_t len;
323                 uint64_t offset;
324
325                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
326                 addr = desc->addr;
327                 len = desc->len;
328
329                 offset = xsk_umem__extract_offset(addr);
330                 addr = xsk_umem__extract_addr(addr);
331
332                 bufs[i] = (struct rte_mbuf *)
333                                 xsk_umem__get_data(umem->buffer, addr +
334                                         umem->mb_pool->header_size);
335                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
336                         rte_pktmbuf_priv_size(umem->mb_pool) -
337                         umem->mb_pool->header_size;
338
339                 rte_pktmbuf_pkt_len(bufs[i]) = len;
340                 rte_pktmbuf_data_len(bufs[i]) = len;
341                 rx_bytes += len;
342         }
343
344         xsk_ring_cons__release(rx, nb_pkts);
345         (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
346
347         /* statistics */
348         rxq->stats.rx_pkts += nb_pkts;
349         rxq->stats.rx_bytes += rx_bytes;
350
351         return nb_pkts;
352 }
353 #else
354 static uint16_t
355 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
356 {
357         struct pkt_rx_queue *rxq = queue;
358         struct xsk_ring_cons *rx = &rxq->rx;
359         struct xsk_umem_info *umem = rxq->umem;
360         struct xsk_ring_prod *fq = &rxq->fq;
361         uint32_t idx_rx = 0;
362         unsigned long rx_bytes = 0;
363         int i;
364         uint32_t free_thresh = fq->size >> 1;
365         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
366
367         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
368                 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
369
370         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
371         if (nb_pkts == 0) {
372 #if defined(XDP_USE_NEED_WAKEUP)
373                 if (xsk_ring_prod__needs_wakeup(fq))
374                         (void)poll(rxq->fds, 1, 1000);
375 #endif
376                 return 0;
377         }
378
379         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
380                 /* rollback cached_cons which is added by
381                  * xsk_ring_cons__peek
382                  */
383                 rx->cached_cons -= nb_pkts;
384                 return 0;
385         }
386
387         for (i = 0; i < nb_pkts; i++) {
388                 const struct xdp_desc *desc;
389                 uint64_t addr;
390                 uint32_t len;
391                 void *pkt;
392
393                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
394                 addr = desc->addr;
395                 len = desc->len;
396                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
397
398                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
399                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
400                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
401                 rte_pktmbuf_data_len(mbufs[i]) = len;
402                 rx_bytes += len;
403                 bufs[i] = mbufs[i];
404         }
405
406         xsk_ring_cons__release(rx, nb_pkts);
407
408         /* statistics */
409         rxq->stats.rx_pkts += nb_pkts;
410         rxq->stats.rx_bytes += rx_bytes;
411
412         return nb_pkts;
413 }
414 #endif
415
416 static uint16_t
417 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
418 {
419 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
420         return af_xdp_rx_zc(queue, bufs, nb_pkts);
421 #else
422         return af_xdp_rx_cp(queue, bufs, nb_pkts);
423 #endif
424 }
425
426 static uint16_t
427 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
428 {
429         uint16_t nb_rx;
430
431         if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
432                 return af_xdp_rx(queue, bufs, nb_pkts);
433
434         /* Split larger batch into smaller batches of size
435          * ETH_AF_XDP_RX_BATCH_SIZE or less.
436          */
437         nb_rx = 0;
438         while (nb_pkts) {
439                 uint16_t ret, n;
440
441                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
442                 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
443                 nb_rx = (uint16_t)(nb_rx + ret);
444                 nb_pkts = (uint16_t)(nb_pkts - ret);
445                 if (ret < n)
446                         break;
447         }
448
449         return nb_rx;
450 }
451
452 static void
453 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
454 {
455         size_t i, n;
456         uint32_t idx_cq = 0;
457
458         n = xsk_ring_cons__peek(cq, size, &idx_cq);
459
460         for (i = 0; i < n; i++) {
461                 uint64_t addr;
462                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
463 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
464                 addr = xsk_umem__extract_addr(addr);
465                 rte_pktmbuf_free((struct rte_mbuf *)
466                                         xsk_umem__get_data(umem->buffer,
467                                         addr + umem->mb_pool->header_size));
468 #else
469                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
470 #endif
471         }
472
473         xsk_ring_cons__release(cq, n);
474 }
475
476 static void
477 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
478 {
479         struct xsk_umem_info *umem = txq->umem;
480
481         pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
482
483         if (tx_syscall_needed(&txq->tx))
484                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
485                             0, MSG_DONTWAIT) < 0) {
486                         /* some thing unexpected */
487                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
488                                 break;
489
490                         /* pull from completion queue to leave more space */
491                         if (errno == EAGAIN)
492                                 pull_umem_cq(umem,
493                                              XSK_RING_CONS__DEFAULT_NUM_DESCS,
494                                              cq);
495                 }
496 }
497
498 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
499 static uint16_t
500 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
501 {
502         struct pkt_tx_queue *txq = queue;
503         struct xsk_umem_info *umem = txq->umem;
504         struct rte_mbuf *mbuf;
505         unsigned long tx_bytes = 0;
506         int i;
507         uint32_t idx_tx;
508         uint16_t count = 0;
509         struct xdp_desc *desc;
510         uint64_t addr, offset;
511         struct xsk_ring_cons *cq = &txq->pair->cq;
512         uint32_t free_thresh = cq->size >> 1;
513
514         if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
515                 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
516
517         for (i = 0; i < nb_pkts; i++) {
518                 mbuf = bufs[i];
519
520                 if (mbuf->pool == umem->mb_pool) {
521                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
522                                 kick_tx(txq, cq);
523                                 if (!xsk_ring_prod__reserve(&txq->tx, 1,
524                                                             &idx_tx))
525                                         goto out;
526                         }
527                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
528                         desc->len = mbuf->pkt_len;
529                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
530                                         umem->mb_pool->header_size;
531                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
532                                         (uint64_t)mbuf +
533                                         umem->mb_pool->header_size;
534                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
535                         desc->addr = addr | offset;
536                         count++;
537                 } else {
538                         struct rte_mbuf *local_mbuf =
539                                         rte_pktmbuf_alloc(umem->mb_pool);
540                         void *pkt;
541
542                         if (local_mbuf == NULL)
543                                 goto out;
544
545                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
546                                 rte_pktmbuf_free(local_mbuf);
547                                 goto out;
548                         }
549
550                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
551                         desc->len = mbuf->pkt_len;
552
553                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
554                                         umem->mb_pool->header_size;
555                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
556                                         (uint64_t)local_mbuf +
557                                         umem->mb_pool->header_size;
558                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
559                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
560                         desc->addr = addr | offset;
561                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
562                                         desc->len);
563                         rte_pktmbuf_free(mbuf);
564                         count++;
565                 }
566
567                 tx_bytes += mbuf->pkt_len;
568         }
569
570 out:
571         xsk_ring_prod__submit(&txq->tx, count);
572         kick_tx(txq, cq);
573
574         txq->stats.tx_pkts += count;
575         txq->stats.tx_bytes += tx_bytes;
576         txq->stats.tx_dropped += nb_pkts - count;
577
578         return count;
579 }
580 #else
581 static uint16_t
582 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
583 {
584         struct pkt_tx_queue *txq = queue;
585         struct xsk_umem_info *umem = txq->umem;
586         struct rte_mbuf *mbuf;
587         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
588         unsigned long tx_bytes = 0;
589         int i;
590         uint32_t idx_tx;
591         struct xsk_ring_cons *cq = &txq->pair->cq;
592
593         pull_umem_cq(umem, nb_pkts, cq);
594
595         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
596                                         nb_pkts, NULL);
597         if (nb_pkts == 0)
598                 return 0;
599
600         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
601                 kick_tx(txq, cq);
602                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
603                 return 0;
604         }
605
606         for (i = 0; i < nb_pkts; i++) {
607                 struct xdp_desc *desc;
608                 void *pkt;
609
610                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
611                 mbuf = bufs[i];
612                 desc->len = mbuf->pkt_len;
613
614                 desc->addr = (uint64_t)addrs[i];
615                 pkt = xsk_umem__get_data(umem->mz->addr,
616                                          desc->addr);
617                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
618                 tx_bytes += mbuf->pkt_len;
619                 rte_pktmbuf_free(mbuf);
620         }
621
622         xsk_ring_prod__submit(&txq->tx, nb_pkts);
623
624         kick_tx(txq, cq);
625
626         txq->stats.tx_pkts += nb_pkts;
627         txq->stats.tx_bytes += tx_bytes;
628
629         return nb_pkts;
630 }
631
632 static uint16_t
633 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
634 {
635         uint16_t nb_tx;
636
637         if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
638                 return af_xdp_tx_cp(queue, bufs, nb_pkts);
639
640         nb_tx = 0;
641         while (nb_pkts) {
642                 uint16_t ret, n;
643
644                 /* Split larger batch into smaller batches of size
645                  * ETH_AF_XDP_TX_BATCH_SIZE or less.
646                  */
647                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
648                 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
649                 nb_tx = (uint16_t)(nb_tx + ret);
650                 nb_pkts = (uint16_t)(nb_pkts - ret);
651                 if (ret < n)
652                         break;
653         }
654
655         return nb_tx;
656 }
657 #endif
658
659 static uint16_t
660 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
661 {
662 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
663         return af_xdp_tx_zc(queue, bufs, nb_pkts);
664 #else
665         return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
666 #endif
667 }
668
669 static int
670 eth_dev_start(struct rte_eth_dev *dev)
671 {
672         dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
673
674         return 0;
675 }
676
677 /* This function gets called when the current port gets stopped. */
678 static int
679 eth_dev_stop(struct rte_eth_dev *dev)
680 {
681         dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
682         return 0;
683 }
684
685 /* Find ethdev in list */
686 static inline struct internal_list *
687 find_internal_resource(struct pmd_internals *port_int)
688 {
689         int found = 0;
690         struct internal_list *list = NULL;
691
692         if (port_int == NULL)
693                 return NULL;
694
695         pthread_mutex_lock(&internal_list_lock);
696
697         TAILQ_FOREACH(list, &internal_list, next) {
698                 struct pmd_internals *list_int =
699                                 list->eth_dev->data->dev_private;
700                 if (list_int == port_int) {
701                         found = 1;
702                         break;
703                 }
704         }
705
706         pthread_mutex_unlock(&internal_list_lock);
707
708         if (!found)
709                 return NULL;
710
711         return list;
712 }
713
714 static int
715 eth_dev_configure(struct rte_eth_dev *dev)
716 {
717         struct pmd_internals *internal = dev->data->dev_private;
718
719         /* rx/tx must be paired */
720         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
721                 return -EINVAL;
722
723         if (internal->shared_umem) {
724                 struct internal_list *list = NULL;
725                 const char *name = dev->device->name;
726
727                 /* Ensure PMD is not already inserted into the list */
728                 list = find_internal_resource(internal);
729                 if (list)
730                         return 0;
731
732                 list = rte_zmalloc_socket(name, sizeof(*list), 0,
733                                         dev->device->numa_node);
734                 if (list == NULL)
735                         return -1;
736
737                 list->eth_dev = dev;
738                 pthread_mutex_lock(&internal_list_lock);
739                 TAILQ_INSERT_TAIL(&internal_list, list, next);
740                 pthread_mutex_unlock(&internal_list_lock);
741         }
742
743         return 0;
744 }
745
746 #define CLB_VAL_IDX 0
747 static int
748 eth_monitor_callback(const uint64_t value,
749                 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
750 {
751         const uint64_t v = opaque[CLB_VAL_IDX];
752         const uint64_t m = (uint32_t)~0;
753
754         /* if the value has changed, abort entering power optimized state */
755         return (value & m) == v ? 0 : -1;
756 }
757
758 static int
759 eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
760 {
761         struct pkt_rx_queue *rxq = rx_queue;
762         unsigned int *prod = rxq->rx.producer;
763         const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
764
765         /* watch for changes in producer ring */
766         pmc->addr = (void *)prod;
767
768         /* store current value */
769         pmc->opaque[CLB_VAL_IDX] = cur_val;
770         pmc->fn = eth_monitor_callback;
771
772         /* AF_XDP producer ring index is 32-bit */
773         pmc->size = sizeof(uint32_t);
774
775         return 0;
776 }
777
778 static int
779 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
780 {
781         struct pmd_internals *internals = dev->data->dev_private;
782
783         dev_info->if_index = internals->if_index;
784         dev_info->max_mac_addrs = 1;
785         dev_info->max_rx_queues = internals->queue_cnt;
786         dev_info->max_tx_queues = internals->queue_cnt;
787
788         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
789 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
790         dev_info->max_rx_pktlen = getpagesize() -
791                                   sizeof(struct rte_mempool_objhdr) -
792                                   sizeof(struct rte_mbuf) -
793                                   RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
794 #else
795         dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
796 #endif
797         dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
798
799         dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
800         dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
801         dev_info->default_rxportconf.nb_queues = 1;
802         dev_info->default_txportconf.nb_queues = 1;
803         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
804         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
805
806         return 0;
807 }
808
809 static int
810 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
811 {
812         struct pmd_internals *internals = dev->data->dev_private;
813         struct pmd_process_private *process_private = dev->process_private;
814         struct xdp_statistics xdp_stats;
815         struct pkt_rx_queue *rxq;
816         struct pkt_tx_queue *txq;
817         socklen_t optlen;
818         int i, ret, fd;
819
820         for (i = 0; i < dev->data->nb_rx_queues; i++) {
821                 optlen = sizeof(struct xdp_statistics);
822                 rxq = &internals->rx_queues[i];
823                 txq = rxq->pair;
824                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
825                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
826
827                 stats->q_opackets[i] = txq->stats.tx_pkts;
828                 stats->q_obytes[i] = txq->stats.tx_bytes;
829
830                 stats->ipackets += stats->q_ipackets[i];
831                 stats->ibytes += stats->q_ibytes[i];
832                 stats->imissed += rxq->stats.rx_dropped;
833                 stats->oerrors += txq->stats.tx_dropped;
834                 fd = process_private->rxq_xsk_fds[i];
835                 ret = fd >= 0 ? getsockopt(fd, SOL_XDP, XDP_STATISTICS,
836                                            &xdp_stats, &optlen) : -1;
837                 if (ret != 0) {
838                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
839                         return -1;
840                 }
841                 stats->imissed += xdp_stats.rx_dropped;
842
843                 stats->opackets += stats->q_opackets[i];
844                 stats->obytes += stats->q_obytes[i];
845         }
846
847         return 0;
848 }
849
850 static int
851 eth_stats_reset(struct rte_eth_dev *dev)
852 {
853         struct pmd_internals *internals = dev->data->dev_private;
854         int i;
855
856         for (i = 0; i < internals->queue_cnt; i++) {
857                 memset(&internals->rx_queues[i].stats, 0,
858                                         sizeof(struct rx_stats));
859                 memset(&internals->tx_queues[i].stats, 0,
860                                         sizeof(struct tx_stats));
861         }
862
863         return 0;
864 }
865
866 static void
867 remove_xdp_program(struct pmd_internals *internals)
868 {
869         uint32_t curr_prog_id = 0;
870
871         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
872                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
873                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
874                 return;
875         }
876         bpf_set_link_xdp_fd(internals->if_index, -1,
877                         XDP_FLAGS_UPDATE_IF_NOEXIST);
878 }
879
880 static void
881 xdp_umem_destroy(struct xsk_umem_info *umem)
882 {
883 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
884         umem->mb_pool = NULL;
885 #else
886         rte_memzone_free(umem->mz);
887         umem->mz = NULL;
888
889         rte_ring_free(umem->buf_ring);
890         umem->buf_ring = NULL;
891 #endif
892
893         rte_free(umem);
894 }
895
896 static int
897 eth_dev_close(struct rte_eth_dev *dev)
898 {
899         struct pmd_internals *internals = dev->data->dev_private;
900         struct pkt_rx_queue *rxq;
901         int i;
902
903         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
904                 goto out;
905
906         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
907                 rte_socket_id());
908
909         for (i = 0; i < internals->queue_cnt; i++) {
910                 rxq = &internals->rx_queues[i];
911                 if (rxq->umem == NULL)
912                         break;
913                 xsk_socket__delete(rxq->xsk);
914
915                 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
916                                 == 0) {
917                         (void)xsk_umem__delete(rxq->umem->umem);
918                         xdp_umem_destroy(rxq->umem);
919                 }
920
921                 /* free pkt_tx_queue */
922                 rte_free(rxq->pair);
923                 rte_free(rxq);
924         }
925
926         /*
927          * MAC is not allocated dynamically, setting it to NULL would prevent
928          * from releasing it in rte_eth_dev_release_port.
929          */
930         dev->data->mac_addrs = NULL;
931
932         remove_xdp_program(internals);
933
934         if (internals->shared_umem) {
935                 struct internal_list *list;
936
937                 /* Remove ethdev from list used to track and share UMEMs */
938                 list = find_internal_resource(internals);
939                 if (list) {
940                         pthread_mutex_lock(&internal_list_lock);
941                         TAILQ_REMOVE(&internal_list, list, next);
942                         pthread_mutex_unlock(&internal_list_lock);
943                         rte_free(list);
944                 }
945         }
946
947 out:
948         rte_free(dev->process_private);
949
950         return 0;
951 }
952
953 static int
954 eth_link_update(struct rte_eth_dev *dev __rte_unused,
955                 int wait_to_complete __rte_unused)
956 {
957         return 0;
958 }
959
960 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
961 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
962 {
963         struct rte_mempool_memhdr *memhdr;
964         uintptr_t memhdr_addr, aligned_addr;
965
966         memhdr = STAILQ_FIRST(&mp->mem_list);
967         memhdr_addr = (uintptr_t)memhdr->addr;
968         aligned_addr = memhdr_addr & ~(getpagesize() - 1);
969         *align = memhdr_addr - aligned_addr;
970
971         return aligned_addr;
972 }
973
974 /* Check if the netdev,qid context already exists */
975 static inline bool
976 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
977                 struct pkt_rx_queue *list_rxq, const char *list_ifname)
978 {
979         bool exists = false;
980
981         if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
982                         !strncmp(ifname, list_ifname, IFNAMSIZ)) {
983                 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
984                                         ifname, rxq->xsk_queue_idx);
985                 exists = true;
986         }
987
988         return exists;
989 }
990
991 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
992 static inline int
993 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
994                         struct xsk_umem_info **umem)
995 {
996         struct internal_list *list;
997         struct pmd_internals *internals;
998         int i = 0, ret = 0;
999         struct rte_mempool *mb_pool = rxq->mb_pool;
1000
1001         if (mb_pool == NULL)
1002                 return ret;
1003
1004         pthread_mutex_lock(&internal_list_lock);
1005
1006         TAILQ_FOREACH(list, &internal_list, next) {
1007                 internals = list->eth_dev->data->dev_private;
1008                 for (i = 0; i < internals->queue_cnt; i++) {
1009                         struct pkt_rx_queue *list_rxq =
1010                                                 &internals->rx_queues[i];
1011                         if (rxq == list_rxq)
1012                                 continue;
1013                         if (mb_pool == internals->rx_queues[i].mb_pool) {
1014                                 if (ctx_exists(rxq, ifname, list_rxq,
1015                                                 internals->if_name)) {
1016                                         ret = -1;
1017                                         goto out;
1018                                 }
1019                                 if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
1020                                                     __ATOMIC_ACQUIRE)) {
1021                                         *umem = internals->rx_queues[i].umem;
1022                                         goto out;
1023                                 }
1024                         }
1025                 }
1026         }
1027
1028 out:
1029         pthread_mutex_unlock(&internal_list_lock);
1030
1031         return ret;
1032 }
1033
1034 static struct
1035 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1036                                   struct pkt_rx_queue *rxq)
1037 {
1038         struct xsk_umem_info *umem = NULL;
1039         int ret;
1040         struct xsk_umem_config usr_config = {
1041                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
1042                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1043                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
1044         void *base_addr = NULL;
1045         struct rte_mempool *mb_pool = rxq->mb_pool;
1046         uint64_t umem_size, align = 0;
1047
1048         if (internals->shared_umem) {
1049                 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
1050                         return NULL;
1051
1052                 if (umem != NULL &&
1053                         __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
1054                                         umem->max_xsks) {
1055                         AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
1056                                         internals->if_name, rxq->xsk_queue_idx);
1057                         __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
1058                 }
1059         }
1060
1061         if (umem == NULL) {
1062                 usr_config.frame_size =
1063                         rte_mempool_calc_obj_size(mb_pool->elt_size,
1064                                                   mb_pool->flags, NULL);
1065                 usr_config.frame_headroom = mb_pool->header_size +
1066                                                 sizeof(struct rte_mbuf) +
1067                                                 rte_pktmbuf_priv_size(mb_pool) +
1068                                                 RTE_PKTMBUF_HEADROOM;
1069
1070                 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1071                                           rte_socket_id());
1072                 if (umem == NULL) {
1073                         AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
1074                         return NULL;
1075                 }
1076
1077                 umem->mb_pool = mb_pool;
1078                 base_addr = (void *)get_base_addr(mb_pool, &align);
1079                 umem_size = (uint64_t)mb_pool->populated_size *
1080                                 (uint64_t)usr_config.frame_size +
1081                                 align;
1082
1083                 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1084                                 &rxq->fq, &rxq->cq, &usr_config);
1085                 if (ret) {
1086                         AF_XDP_LOG(ERR, "Failed to create umem\n");
1087                         goto err;
1088                 }
1089                 umem->buffer = base_addr;
1090
1091                 if (internals->shared_umem) {
1092                         umem->max_xsks = mb_pool->populated_size /
1093                                                 ETH_AF_XDP_NUM_BUFFERS;
1094                         AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1095                                                 mb_pool->name, umem->max_xsks);
1096                 }
1097
1098                 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1099         }
1100
1101         return umem;
1102
1103 err:
1104         xdp_umem_destroy(umem);
1105         return NULL;
1106 }
1107 #else
1108 static struct
1109 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1110                                   struct pkt_rx_queue *rxq)
1111 {
1112         struct xsk_umem_info *umem;
1113         const struct rte_memzone *mz;
1114         struct xsk_umem_config usr_config = {
1115                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1116                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1117                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1118                 .frame_headroom = 0 };
1119         char ring_name[RTE_RING_NAMESIZE];
1120         char mz_name[RTE_MEMZONE_NAMESIZE];
1121         int ret;
1122         uint64_t i;
1123
1124         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1125         if (umem == NULL) {
1126                 AF_XDP_LOG(ERR, "Failed to allocate umem info\n");
1127                 return NULL;
1128         }
1129
1130         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1131                        internals->if_name, rxq->xsk_queue_idx);
1132         umem->buf_ring = rte_ring_create(ring_name,
1133                                          ETH_AF_XDP_NUM_BUFFERS,
1134                                          rte_socket_id(),
1135                                          0x0);
1136         if (umem->buf_ring == NULL) {
1137                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1138                 goto err;
1139         }
1140
1141         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1142                 rte_ring_enqueue(umem->buf_ring,
1143                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1144
1145         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1146                        internals->if_name, rxq->xsk_queue_idx);
1147         mz = rte_memzone_reserve_aligned(mz_name,
1148                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1149                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1150                         getpagesize());
1151         if (mz == NULL) {
1152                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1153                 goto err;
1154         }
1155
1156         ret = xsk_umem__create(&umem->umem, mz->addr,
1157                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1158                                &rxq->fq, &rxq->cq,
1159                                &usr_config);
1160
1161         if (ret) {
1162                 AF_XDP_LOG(ERR, "Failed to create umem\n");
1163                 goto err;
1164         }
1165         umem->mz = mz;
1166
1167         return umem;
1168
1169 err:
1170         xdp_umem_destroy(umem);
1171         return NULL;
1172 }
1173 #endif
1174
1175 static int
1176 load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
1177 {
1178         int ret, prog_fd;
1179         struct bpf_object *obj;
1180
1181         prog_fd = load_program(prog_path, &obj);
1182         if (prog_fd < 0) {
1183                 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1184                 return -1;
1185         }
1186
1187         /*
1188          * The loaded program must provision for a map of xsks, such that some
1189          * traffic can be redirected to userspace.
1190          */
1191         *map = bpf_object__find_map_by_name(obj, "xsks_map");
1192         if (!*map) {
1193                 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1194                 return -1;
1195         }
1196
1197         /* Link the program with the given network device */
1198         ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1199                                         XDP_FLAGS_UPDATE_IF_NOEXIST);
1200         if (ret) {
1201                 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1202                                 prog_fd);
1203                 return -1;
1204         }
1205
1206         AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1207                                 prog_path, prog_fd);
1208
1209         return 0;
1210 }
1211
1212 /* Detect support for busy polling through setsockopt(). */
1213 static int
1214 configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
1215 {
1216         int sock_opt = 1;
1217         int fd = xsk_socket__fd(rxq->xsk);
1218         int ret = 0;
1219
1220         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1221                         (void *)&sock_opt, sizeof(sock_opt));
1222         if (ret < 0) {
1223                 AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
1224                 goto err_prefer;
1225         }
1226
1227         sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
1228         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1229                         sizeof(sock_opt));
1230         if (ret < 0) {
1231                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
1232                 goto err_timeout;
1233         }
1234
1235         sock_opt = rxq->busy_budget;
1236         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1237                         (void *)&sock_opt, sizeof(sock_opt));
1238         if (ret < 0) {
1239                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
1240         } else {
1241                 AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
1242                                         rxq->busy_budget);
1243                 return 0;
1244         }
1245
1246         /* setsockopt failure - attempt to restore xsk to default state and
1247          * proceed without busy polling support.
1248          */
1249         sock_opt = 0;
1250         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1251                         sizeof(sock_opt));
1252         if (ret < 0) {
1253                 AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
1254                 return -1;
1255         }
1256
1257 err_timeout:
1258         sock_opt = 0;
1259         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1260                         (void *)&sock_opt, sizeof(sock_opt));
1261         if (ret < 0) {
1262                 AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
1263                 return -1;
1264         }
1265
1266 err_prefer:
1267         rxq->busy_budget = 0;
1268         return 0;
1269 }
1270
1271 static int
1272 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1273               int ring_size)
1274 {
1275         struct xsk_socket_config cfg;
1276         struct pkt_tx_queue *txq = rxq->pair;
1277         int ret = 0;
1278         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1279         struct rte_mbuf *fq_bufs[reserve_size];
1280         bool reserve_before;
1281
1282         rxq->umem = xdp_umem_configure(internals, rxq);
1283         if (rxq->umem == NULL)
1284                 return -ENOMEM;
1285         txq->umem = rxq->umem;
1286         reserve_before = __atomic_load_n(&rxq->umem->refcnt, __ATOMIC_ACQUIRE) <= 1;
1287
1288 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1289         ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1290         if (ret) {
1291                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1292                 goto out_umem;
1293         }
1294 #endif
1295
1296         /* reserve fill queue of queues not (yet) sharing UMEM */
1297         if (reserve_before) {
1298                 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1299                 if (ret) {
1300                         AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1301                         goto out_umem;
1302                 }
1303         }
1304
1305         cfg.rx_size = ring_size;
1306         cfg.tx_size = ring_size;
1307         cfg.libbpf_flags = 0;
1308         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1309         cfg.bind_flags = 0;
1310
1311 #if defined(XDP_USE_NEED_WAKEUP)
1312         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1313 #endif
1314
1315         if (strnlen(internals->prog_path, PATH_MAX)) {
1316                 if (!internals->custom_prog_configured) {
1317                         ret = load_custom_xdp_prog(internals->prog_path,
1318                                                         internals->if_index,
1319                                                         &internals->map);
1320                         if (ret) {
1321                                 AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1322                                                 internals->prog_path);
1323                                 goto out_umem;
1324                         }
1325                         internals->custom_prog_configured = 1;
1326                 }
1327                 cfg.libbpf_flags |= XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1328         }
1329
1330         if (internals->shared_umem)
1331                 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1332                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1333                                 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1334         else
1335                 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1336                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1337                                 &txq->tx, &cfg);
1338
1339         if (ret) {
1340                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1341                 goto out_umem;
1342         }
1343
1344         if (!reserve_before) {
1345                 /* reserve fill queue of queues sharing UMEM */
1346                 ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1347                 if (ret) {
1348                         AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1349                         goto out_xsk;
1350                 }
1351         }
1352
1353         /* insert the xsk into the xsks_map */
1354         if (internals->custom_prog_configured) {
1355                 int err, fd;
1356
1357                 fd = xsk_socket__fd(rxq->xsk);
1358                 err = bpf_map_update_elem(bpf_map__fd(internals->map),
1359                                           &rxq->xsk_queue_idx, &fd, 0);
1360                 if (err) {
1361                         AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
1362                         goto out_xsk;
1363                 }
1364         }
1365
1366         if (rxq->busy_budget) {
1367                 ret = configure_preferred_busy_poll(rxq);
1368                 if (ret) {
1369                         AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
1370                         goto out_xsk;
1371                 }
1372         }
1373
1374         return 0;
1375
1376 out_xsk:
1377         xsk_socket__delete(rxq->xsk);
1378 out_umem:
1379         if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1380                 xdp_umem_destroy(rxq->umem);
1381
1382         return ret;
1383 }
1384
1385 static int
1386 eth_rx_queue_setup(struct rte_eth_dev *dev,
1387                    uint16_t rx_queue_id,
1388                    uint16_t nb_rx_desc,
1389                    unsigned int socket_id __rte_unused,
1390                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1391                    struct rte_mempool *mb_pool)
1392 {
1393         struct pmd_internals *internals = dev->data->dev_private;
1394         struct pmd_process_private *process_private = dev->process_private;
1395         struct pkt_rx_queue *rxq;
1396         int ret;
1397
1398         rxq = &internals->rx_queues[rx_queue_id];
1399
1400         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1401                    rx_queue_id, rxq->xsk_queue_idx);
1402
1403 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1404         uint32_t buf_size, data_size;
1405
1406         /* Now get the space available for data in the mbuf */
1407         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1408                 RTE_PKTMBUF_HEADROOM;
1409         data_size = ETH_AF_XDP_FRAME_SIZE;
1410
1411         if (data_size > buf_size) {
1412                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1413                         dev->device->name, data_size, buf_size);
1414                 ret = -ENOMEM;
1415                 goto err;
1416         }
1417 #endif
1418
1419         rxq->mb_pool = mb_pool;
1420
1421         if (xsk_configure(internals, rxq, nb_rx_desc)) {
1422                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1423                 ret = -EINVAL;
1424                 goto err;
1425         }
1426
1427         if (!rxq->busy_budget)
1428                 AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
1429
1430         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1431         rxq->fds[0].events = POLLIN;
1432
1433         process_private->rxq_xsk_fds[rx_queue_id] = rxq->fds[0].fd;
1434
1435         dev->data->rx_queues[rx_queue_id] = rxq;
1436         return 0;
1437
1438 err:
1439         return ret;
1440 }
1441
1442 static int
1443 eth_tx_queue_setup(struct rte_eth_dev *dev,
1444                    uint16_t tx_queue_id,
1445                    uint16_t nb_tx_desc __rte_unused,
1446                    unsigned int socket_id __rte_unused,
1447                    const struct rte_eth_txconf *tx_conf __rte_unused)
1448 {
1449         struct pmd_internals *internals = dev->data->dev_private;
1450         struct pkt_tx_queue *txq;
1451
1452         txq = &internals->tx_queues[tx_queue_id];
1453
1454         dev->data->tx_queues[tx_queue_id] = txq;
1455         return 0;
1456 }
1457
1458 static int
1459 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1460 {
1461         struct pmd_internals *internals = dev->data->dev_private;
1462         struct ifreq ifr = { .ifr_mtu = mtu };
1463         int ret;
1464         int s;
1465
1466         s = socket(PF_INET, SOCK_DGRAM, 0);
1467         if (s < 0)
1468                 return -EINVAL;
1469
1470         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1471         ret = ioctl(s, SIOCSIFMTU, &ifr);
1472         close(s);
1473
1474         return (ret < 0) ? -errno : 0;
1475 }
1476
1477 static int
1478 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1479 {
1480         struct ifreq ifr;
1481         int ret = 0;
1482         int s;
1483
1484         s = socket(PF_INET, SOCK_DGRAM, 0);
1485         if (s < 0)
1486                 return -errno;
1487
1488         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1489         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1490                 ret = -errno;
1491                 goto out;
1492         }
1493         ifr.ifr_flags &= mask;
1494         ifr.ifr_flags |= flags;
1495         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1496                 ret = -errno;
1497                 goto out;
1498         }
1499 out:
1500         close(s);
1501         return ret;
1502 }
1503
1504 static int
1505 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1506 {
1507         struct pmd_internals *internals = dev->data->dev_private;
1508
1509         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1510 }
1511
1512 static int
1513 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1514 {
1515         struct pmd_internals *internals = dev->data->dev_private;
1516
1517         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1518 }
1519
1520 static const struct eth_dev_ops ops = {
1521         .dev_start = eth_dev_start,
1522         .dev_stop = eth_dev_stop,
1523         .dev_close = eth_dev_close,
1524         .dev_configure = eth_dev_configure,
1525         .dev_infos_get = eth_dev_info,
1526         .mtu_set = eth_dev_mtu_set,
1527         .promiscuous_enable = eth_dev_promiscuous_enable,
1528         .promiscuous_disable = eth_dev_promiscuous_disable,
1529         .rx_queue_setup = eth_rx_queue_setup,
1530         .tx_queue_setup = eth_tx_queue_setup,
1531         .link_update = eth_link_update,
1532         .stats_get = eth_stats_get,
1533         .stats_reset = eth_stats_reset,
1534         .get_monitor_addr = eth_get_monitor_addr,
1535 };
1536
1537 /** parse busy_budget argument */
1538 static int
1539 parse_budget_arg(const char *key __rte_unused,
1540                   const char *value, void *extra_args)
1541 {
1542         int *i = (int *)extra_args;
1543         char *end;
1544
1545         *i = strtol(value, &end, 10);
1546         if (*i < 0 || *i > UINT16_MAX) {
1547                 AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
1548                                 *i, UINT16_MAX);
1549                 return -EINVAL;
1550         }
1551
1552         return 0;
1553 }
1554
1555 /** parse integer from integer argument */
1556 static int
1557 parse_integer_arg(const char *key __rte_unused,
1558                   const char *value, void *extra_args)
1559 {
1560         int *i = (int *)extra_args;
1561         char *end;
1562
1563         *i = strtol(value, &end, 10);
1564         if (*i < 0) {
1565                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1566                 return -EINVAL;
1567         }
1568
1569         return 0;
1570 }
1571
1572 /** parse name argument */
1573 static int
1574 parse_name_arg(const char *key __rte_unused,
1575                const char *value, void *extra_args)
1576 {
1577         char *name = extra_args;
1578
1579         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1580                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1581                            value, IFNAMSIZ);
1582                 return -EINVAL;
1583         }
1584
1585         strlcpy(name, value, IFNAMSIZ);
1586
1587         return 0;
1588 }
1589
1590 /** parse xdp prog argument */
1591 static int
1592 parse_prog_arg(const char *key __rte_unused,
1593                const char *value, void *extra_args)
1594 {
1595         char *path = extra_args;
1596
1597         if (strnlen(value, PATH_MAX) == PATH_MAX) {
1598                 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1599                            value, PATH_MAX);
1600                 return -EINVAL;
1601         }
1602
1603         if (access(value, F_OK) != 0) {
1604                 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1605                            value, strerror(errno));
1606                 return -EINVAL;
1607         }
1608
1609         strlcpy(path, value, PATH_MAX);
1610
1611         return 0;
1612 }
1613
1614 static int
1615 xdp_get_channels_info(const char *if_name, int *max_queues,
1616                                 int *combined_queues)
1617 {
1618         struct ethtool_channels channels;
1619         struct ifreq ifr;
1620         int fd, ret;
1621
1622         fd = socket(AF_INET, SOCK_DGRAM, 0);
1623         if (fd < 0)
1624                 return -1;
1625
1626         channels.cmd = ETHTOOL_GCHANNELS;
1627         ifr.ifr_data = (void *)&channels;
1628         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1629         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1630         if (ret) {
1631                 if (errno == EOPNOTSUPP) {
1632                         ret = 0;
1633                 } else {
1634                         ret = -errno;
1635                         goto out;
1636                 }
1637         }
1638
1639         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1640                 /* If the device says it has no channels, then all traffic
1641                  * is sent to a single stream, so max queues = 1.
1642                  */
1643                 *max_queues = 1;
1644                 *combined_queues = 1;
1645         } else {
1646                 *max_queues = channels.max_combined;
1647                 *combined_queues = channels.combined_count;
1648         }
1649
1650  out:
1651         close(fd);
1652         return ret;
1653 }
1654
1655 static int
1656 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1657                         int *queue_cnt, int *shared_umem, char *prog_path,
1658                         int *busy_budget)
1659 {
1660         int ret;
1661
1662         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1663                                  &parse_name_arg, if_name);
1664         if (ret < 0)
1665                 goto free_kvlist;
1666
1667         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1668                                  &parse_integer_arg, start_queue);
1669         if (ret < 0)
1670                 goto free_kvlist;
1671
1672         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1673                                  &parse_integer_arg, queue_cnt);
1674         if (ret < 0 || *queue_cnt <= 0) {
1675                 ret = -EINVAL;
1676                 goto free_kvlist;
1677         }
1678
1679         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1680                                 &parse_integer_arg, shared_umem);
1681         if (ret < 0)
1682                 goto free_kvlist;
1683
1684         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1685                                  &parse_prog_arg, prog_path);
1686         if (ret < 0)
1687                 goto free_kvlist;
1688
1689         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
1690                                 &parse_budget_arg, busy_budget);
1691         if (ret < 0)
1692                 goto free_kvlist;
1693
1694 free_kvlist:
1695         rte_kvargs_free(kvlist);
1696         return ret;
1697 }
1698
1699 static int
1700 get_iface_info(const char *if_name,
1701                struct rte_ether_addr *eth_addr,
1702                int *if_index)
1703 {
1704         struct ifreq ifr;
1705         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1706
1707         if (sock < 0)
1708                 return -1;
1709
1710         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1711         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1712                 goto error;
1713
1714         *if_index = ifr.ifr_ifindex;
1715
1716         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1717                 goto error;
1718
1719         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1720
1721         close(sock);
1722         return 0;
1723
1724 error:
1725         close(sock);
1726         return -1;
1727 }
1728
1729 static struct rte_eth_dev *
1730 init_internals(struct rte_vdev_device *dev, const char *if_name,
1731                 int start_queue_idx, int queue_cnt, int shared_umem,
1732                 const char *prog_path, int busy_budget)
1733 {
1734         const char *name = rte_vdev_device_name(dev);
1735         const unsigned int numa_node = dev->device.numa_node;
1736         struct pmd_process_private *process_private;
1737         struct pmd_internals *internals;
1738         struct rte_eth_dev *eth_dev;
1739         int ret;
1740         int i;
1741
1742         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1743         if (internals == NULL)
1744                 return NULL;
1745
1746         internals->start_queue_idx = start_queue_idx;
1747         internals->queue_cnt = queue_cnt;
1748         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1749         strlcpy(internals->prog_path, prog_path, PATH_MAX);
1750         internals->custom_prog_configured = 0;
1751
1752 #ifndef ETH_AF_XDP_SHARED_UMEM
1753         if (shared_umem) {
1754                 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1755                                 "Check kernel and libbpf version\n");
1756                 goto err_free_internals;
1757         }
1758 #endif
1759         internals->shared_umem = shared_umem;
1760
1761         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1762                                   &internals->combined_queue_cnt)) {
1763                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1764                                 if_name);
1765                 goto err_free_internals;
1766         }
1767
1768         if (queue_cnt > internals->combined_queue_cnt) {
1769                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1770                                 queue_cnt, internals->combined_queue_cnt);
1771                 goto err_free_internals;
1772         }
1773
1774         internals->rx_queues = rte_zmalloc_socket(NULL,
1775                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1776                                         0, numa_node);
1777         if (internals->rx_queues == NULL) {
1778                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1779                 goto err_free_internals;
1780         }
1781
1782         internals->tx_queues = rte_zmalloc_socket(NULL,
1783                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1784                                         0, numa_node);
1785         if (internals->tx_queues == NULL) {
1786                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1787                 goto err_free_rx;
1788         }
1789         for (i = 0; i < queue_cnt; i++) {
1790                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1791                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1792                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1793                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1794                 internals->rx_queues[i].busy_budget = busy_budget;
1795         }
1796
1797         ret = get_iface_info(if_name, &internals->eth_addr,
1798                              &internals->if_index);
1799         if (ret)
1800                 goto err_free_tx;
1801
1802         process_private = (struct pmd_process_private *)
1803                 rte_zmalloc_socket(name, sizeof(struct pmd_process_private),
1804                                    RTE_CACHE_LINE_SIZE, numa_node);
1805         if (process_private == NULL) {
1806                 AF_XDP_LOG(ERR, "Failed to alloc memory for process private\n");
1807                 goto err_free_tx;
1808         }
1809
1810         eth_dev = rte_eth_vdev_allocate(dev, 0);
1811         if (eth_dev == NULL)
1812                 goto err_free_pp;
1813
1814         eth_dev->data->dev_private = internals;
1815         eth_dev->data->dev_link = pmd_link;
1816         eth_dev->data->mac_addrs = &internals->eth_addr;
1817         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1818         eth_dev->dev_ops = &ops;
1819         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1820         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1821         eth_dev->process_private = process_private;
1822
1823         for (i = 0; i < queue_cnt; i++)
1824                 process_private->rxq_xsk_fds[i] = -1;
1825
1826 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1827         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1828 #endif
1829
1830         return eth_dev;
1831
1832 err_free_pp:
1833         rte_free(process_private);
1834 err_free_tx:
1835         rte_free(internals->tx_queues);
1836 err_free_rx:
1837         rte_free(internals->rx_queues);
1838 err_free_internals:
1839         rte_free(internals);
1840         return NULL;
1841 }
1842
1843 /* Secondary process requests rxq fds from primary. */
1844 static int
1845 afxdp_mp_request_fds(const char *name, struct rte_eth_dev *dev)
1846 {
1847         struct pmd_process_private *process_private = dev->process_private;
1848         struct timespec timeout = {.tv_sec = 1, .tv_nsec = 0};
1849         struct rte_mp_msg request, *reply;
1850         struct rte_mp_reply replies;
1851         struct ipc_hdr *request_param = (struct ipc_hdr *)request.param;
1852         int i, ret;
1853
1854         /* Prepare the request */
1855         memset(&request, 0, sizeof(request));
1856         strlcpy(request.name, ETH_AF_XDP_MP_KEY, sizeof(request.name));
1857         strlcpy(request_param->port_name, name,
1858                 sizeof(request_param->port_name));
1859         request.len_param = sizeof(*request_param);
1860
1861         /* Send the request and receive the reply */
1862         AF_XDP_LOG(DEBUG, "Sending multi-process IPC request for %s\n", name);
1863         ret = rte_mp_request_sync(&request, &replies, &timeout);
1864         if (ret < 0 || replies.nb_received != 1) {
1865                 AF_XDP_LOG(ERR, "Failed to request fds from primary: %d\n",
1866                            rte_errno);
1867                 return -1;
1868         }
1869         reply = replies.msgs;
1870         AF_XDP_LOG(DEBUG, "Received multi-process IPC reply for %s\n", name);
1871         if (dev->data->nb_rx_queues != reply->num_fds) {
1872                 AF_XDP_LOG(ERR, "Incorrect number of fds received: %d != %d\n",
1873                            reply->num_fds, dev->data->nb_rx_queues);
1874                 return -EINVAL;
1875         }
1876
1877         for (i = 0; i < reply->num_fds; i++)
1878                 process_private->rxq_xsk_fds[i] = reply->fds[i];
1879
1880         free(reply);
1881         return 0;
1882 }
1883
1884 /* Primary process sends rxq fds to secondary. */
1885 static int
1886 afxdp_mp_send_fds(const struct rte_mp_msg *request, const void *peer)
1887 {
1888         struct rte_eth_dev *dev;
1889         struct pmd_process_private *process_private;
1890         struct rte_mp_msg reply;
1891         const struct ipc_hdr *request_param =
1892                 (const struct ipc_hdr *)request->param;
1893         struct ipc_hdr *reply_param =
1894                 (struct ipc_hdr *)reply.param;
1895         const char *request_name = request_param->port_name;
1896         int i;
1897
1898         AF_XDP_LOG(DEBUG, "Received multi-process IPC request for %s\n",
1899                    request_name);
1900
1901         /* Find the requested port */
1902         dev = rte_eth_dev_get_by_name(request_name);
1903         if (!dev) {
1904                 AF_XDP_LOG(ERR, "Failed to get port id for %s\n", request_name);
1905                 return -1;
1906         }
1907         process_private = dev->process_private;
1908
1909         /* Populate the reply with the xsk fd for each queue */
1910         reply.num_fds = 0;
1911         if (dev->data->nb_rx_queues > RTE_MP_MAX_FD_NUM) {
1912                 AF_XDP_LOG(ERR, "Number of rx queues (%d) exceeds max number of fds (%d)\n",
1913                            dev->data->nb_rx_queues, RTE_MP_MAX_FD_NUM);
1914                 return -EINVAL;
1915         }
1916
1917         for (i = 0; i < dev->data->nb_rx_queues; i++)
1918                 reply.fds[reply.num_fds++] = process_private->rxq_xsk_fds[i];
1919
1920         /* Send the reply */
1921         strlcpy(reply.name, request->name, sizeof(reply.name));
1922         strlcpy(reply_param->port_name, request_name,
1923                 sizeof(reply_param->port_name));
1924         reply.len_param = sizeof(*reply_param);
1925         AF_XDP_LOG(DEBUG, "Sending multi-process IPC reply for %s\n",
1926                    reply_param->port_name);
1927         if (rte_mp_reply(&reply, peer) < 0) {
1928                 AF_XDP_LOG(ERR, "Failed to reply to multi-process IPC request\n");
1929                 return -1;
1930         }
1931         return 0;
1932 }
1933
1934 static int
1935 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1936 {
1937         struct rte_kvargs *kvlist;
1938         char if_name[IFNAMSIZ] = {'\0'};
1939         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1940         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1941         int shared_umem = 0;
1942         char prog_path[PATH_MAX] = {'\0'};
1943         int busy_budget = -1, ret;
1944         struct rte_eth_dev *eth_dev = NULL;
1945         const char *name = rte_vdev_device_name(dev);
1946
1947         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n", name);
1948
1949         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1950                 eth_dev = rte_eth_dev_attach_secondary(name);
1951                 if (eth_dev == NULL) {
1952                         AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1953                         return -EINVAL;
1954                 }
1955                 eth_dev->dev_ops = &ops;
1956                 eth_dev->device = &dev->device;
1957                 eth_dev->rx_pkt_burst = rte_eth_pkt_burst_dummy;
1958                 eth_dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
1959                 eth_dev->process_private = (struct pmd_process_private *)
1960                         rte_zmalloc_socket(name,
1961                                            sizeof(struct pmd_process_private),
1962                                            RTE_CACHE_LINE_SIZE,
1963                                            eth_dev->device->numa_node);
1964                 if (eth_dev->process_private == NULL) {
1965                         AF_XDP_LOG(ERR,
1966                                 "Failed to alloc memory for process private\n");
1967                         return -ENOMEM;
1968                 }
1969
1970                 /* Obtain the xsk fds from the primary process. */
1971                 if (afxdp_mp_request_fds(name, eth_dev))
1972                         return -1;
1973
1974                 rte_eth_dev_probing_finish(eth_dev);
1975                 return 0;
1976         }
1977
1978         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1979         if (kvlist == NULL) {
1980                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1981                 return -EINVAL;
1982         }
1983
1984         if (dev->device.numa_node == SOCKET_ID_ANY)
1985                 dev->device.numa_node = rte_socket_id();
1986
1987         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1988                              &xsk_queue_cnt, &shared_umem, prog_path,
1989                              &busy_budget) < 0) {
1990                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1991                 return -EINVAL;
1992         }
1993
1994         if (strlen(if_name) == 0) {
1995                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1996                 return -EINVAL;
1997         }
1998
1999         busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
2000                                         busy_budget;
2001
2002         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
2003                                         xsk_queue_cnt, shared_umem, prog_path,
2004                                         busy_budget);
2005         if (eth_dev == NULL) {
2006                 AF_XDP_LOG(ERR, "Failed to init internals\n");
2007                 return -1;
2008         }
2009
2010         /* Register IPC callback which shares xsk fds from primary to secondary */
2011         if (!afxdp_dev_count) {
2012                 ret = rte_mp_action_register(ETH_AF_XDP_MP_KEY, afxdp_mp_send_fds);
2013                 if (ret < 0 && rte_errno != ENOTSUP) {
2014                         AF_XDP_LOG(ERR, "%s: Failed to register multi-process IPC callback: %s\n",
2015                                    name, strerror(rte_errno));
2016                         return -1;
2017                 }
2018         }
2019         afxdp_dev_count++;
2020
2021         rte_eth_dev_probing_finish(eth_dev);
2022
2023         return 0;
2024 }
2025
2026 static int
2027 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
2028 {
2029         struct rte_eth_dev *eth_dev = NULL;
2030
2031         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
2032                 rte_socket_id());
2033
2034         if (dev == NULL)
2035                 return -1;
2036
2037         /* find the ethdev entry */
2038         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
2039         if (eth_dev == NULL)
2040                 return 0;
2041
2042         eth_dev_close(eth_dev);
2043         if (afxdp_dev_count == 1)
2044                 rte_mp_action_unregister(ETH_AF_XDP_MP_KEY);
2045         afxdp_dev_count--;
2046         rte_eth_dev_release_port(eth_dev);
2047
2048         return 0;
2049 }
2050
2051 static struct rte_vdev_driver pmd_af_xdp_drv = {
2052         .probe = rte_pmd_af_xdp_probe,
2053         .remove = rte_pmd_af_xdp_remove,
2054 };
2055
2056 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
2057 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
2058                               "iface=<string> "
2059                               "start_queue=<int> "
2060                               "queue_count=<int> "
2061                               "shared_umem=<int> "
2062                               "xdp_prog=<string> "
2063                               "busy_budget=<int>");