3957227bf01cb592f8721fd95177539d35f418b6
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <poll.h>
9 #include <netinet/in.h>
10 #include <net/if.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
19 #include <bpf/xsk.h>
20
21 #include <rte_ethdev.h>
22 #include <ethdev_driver.h>
23 #include <ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_dev.h>
30 #include <rte_eal.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
33 #include <rte_log.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_malloc.h>
39 #include <rte_ring.h>
40 #include <rte_spinlock.h>
41
42 #include "compat.h"
43
44
45 #ifndef SOL_XDP
46 #define SOL_XDP 283
47 #endif
48
49 #ifndef AF_XDP
50 #define AF_XDP 44
51 #endif
52
53 #ifndef PF_XDP
54 #define PF_XDP AF_XDP
55 #endif
56
57 RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
58
59 #define AF_XDP_LOG(level, fmt, args...)                 \
60         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
61                 "%s(): " fmt, __func__, ##args)
62
63 #define ETH_AF_XDP_FRAME_SIZE           2048
64 #define ETH_AF_XDP_NUM_BUFFERS          4096
65 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
66 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
67 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
68
69 #define ETH_AF_XDP_RX_BATCH_SIZE        32
70 #define ETH_AF_XDP_TX_BATCH_SIZE        32
71
72
73 struct xsk_umem_info {
74         struct xsk_umem *umem;
75         struct rte_ring *buf_ring;
76         const struct rte_memzone *mz;
77         struct rte_mempool *mb_pool;
78         void *buffer;
79         uint8_t refcnt;
80         uint32_t max_xsks;
81 };
82
83 struct rx_stats {
84         uint64_t rx_pkts;
85         uint64_t rx_bytes;
86         uint64_t rx_dropped;
87 };
88
89 struct pkt_rx_queue {
90         struct xsk_ring_cons rx;
91         struct xsk_umem_info *umem;
92         struct xsk_socket *xsk;
93         struct rte_mempool *mb_pool;
94
95         struct rx_stats stats;
96
97         struct xsk_ring_prod fq;
98         struct xsk_ring_cons cq;
99
100         struct pkt_tx_queue *pair;
101         struct pollfd fds[1];
102         int xsk_queue_idx;
103 };
104
105 struct tx_stats {
106         uint64_t tx_pkts;
107         uint64_t tx_bytes;
108         uint64_t tx_dropped;
109 };
110
111 struct pkt_tx_queue {
112         struct xsk_ring_prod tx;
113         struct xsk_umem_info *umem;
114
115         struct tx_stats stats;
116
117         struct pkt_rx_queue *pair;
118         int xsk_queue_idx;
119 };
120
121 struct pmd_internals {
122         int if_index;
123         char if_name[IFNAMSIZ];
124         int start_queue_idx;
125         int queue_cnt;
126         int max_queue_cnt;
127         int combined_queue_cnt;
128         bool shared_umem;
129         char prog_path[PATH_MAX];
130         bool custom_prog_configured;
131
132         struct rte_ether_addr eth_addr;
133
134         struct pkt_rx_queue *rx_queues;
135         struct pkt_tx_queue *tx_queues;
136 };
137
138 #define ETH_AF_XDP_IFACE_ARG                    "iface"
139 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
140 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
141 #define ETH_AF_XDP_SHARED_UMEM_ARG              "shared_umem"
142 #define ETH_AF_XDP_PROG_ARG                     "xdp_prog"
143
144 static const char * const valid_arguments[] = {
145         ETH_AF_XDP_IFACE_ARG,
146         ETH_AF_XDP_START_QUEUE_ARG,
147         ETH_AF_XDP_QUEUE_COUNT_ARG,
148         ETH_AF_XDP_SHARED_UMEM_ARG,
149         ETH_AF_XDP_PROG_ARG,
150         NULL
151 };
152
153 static const struct rte_eth_link pmd_link = {
154         .link_speed = ETH_SPEED_NUM_10G,
155         .link_duplex = ETH_LINK_FULL_DUPLEX,
156         .link_status = ETH_LINK_DOWN,
157         .link_autoneg = ETH_LINK_AUTONEG
158 };
159
160 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
161 struct internal_list {
162         TAILQ_ENTRY(internal_list) next;
163         struct rte_eth_dev *eth_dev;
164 };
165
166 TAILQ_HEAD(internal_list_head, internal_list);
167 static struct internal_list_head internal_list =
168         TAILQ_HEAD_INITIALIZER(internal_list);
169
170 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
171
172 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
173 static inline int
174 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
175                       struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
176 {
177         uint32_t idx;
178         uint16_t i;
179
180         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
181                 for (i = 0; i < reserve_size; i++)
182                         rte_pktmbuf_free(bufs[i]);
183                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
184                 return -1;
185         }
186
187         for (i = 0; i < reserve_size; i++) {
188                 __u64 *fq_addr;
189                 uint64_t addr;
190
191                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
192                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
193                                 umem->mb_pool->header_size;
194                 *fq_addr = addr;
195         }
196
197         xsk_ring_prod__submit(fq, reserve_size);
198
199         return 0;
200 }
201 #else
202 static inline int
203 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
204                       struct rte_mbuf **bufs __rte_unused,
205                       struct xsk_ring_prod *fq)
206 {
207         void *addrs[reserve_size];
208         uint32_t idx;
209         uint16_t i;
210
211         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
212                     != reserve_size) {
213                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
214                 return -1;
215         }
216
217         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
218                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
219                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
220                                 reserve_size, NULL);
221                 return -1;
222         }
223
224         for (i = 0; i < reserve_size; i++) {
225                 __u64 *fq_addr;
226
227                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
228                 *fq_addr = (uint64_t)addrs[i];
229         }
230
231         xsk_ring_prod__submit(fq, reserve_size);
232
233         return 0;
234 }
235 #endif
236
237 static inline int
238 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
239                    struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
240 {
241 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
242         return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
243 #else
244         return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
245 #endif
246 }
247
248 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
249 static uint16_t
250 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
251 {
252         struct pkt_rx_queue *rxq = queue;
253         struct xsk_ring_cons *rx = &rxq->rx;
254         struct xsk_ring_prod *fq = &rxq->fq;
255         struct xsk_umem_info *umem = rxq->umem;
256         uint32_t idx_rx = 0;
257         unsigned long rx_bytes = 0;
258         int i;
259         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
260
261         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
262
263         if (nb_pkts == 0) {
264 #if defined(XDP_USE_NEED_WAKEUP)
265                 if (xsk_ring_prod__needs_wakeup(fq))
266                         (void)poll(rxq->fds, 1, 1000);
267 #endif
268
269                 return 0;
270         }
271
272         /* allocate bufs for fill queue replenishment after rx */
273         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
274                 AF_XDP_LOG(DEBUG,
275                         "Failed to get enough buffers for fq.\n");
276                 /* rollback cached_cons which is added by
277                  * xsk_ring_cons__peek
278                  */
279                 rx->cached_cons -= nb_pkts;
280                 return 0;
281         }
282
283         for (i = 0; i < nb_pkts; i++) {
284                 const struct xdp_desc *desc;
285                 uint64_t addr;
286                 uint32_t len;
287                 uint64_t offset;
288
289                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
290                 addr = desc->addr;
291                 len = desc->len;
292
293                 offset = xsk_umem__extract_offset(addr);
294                 addr = xsk_umem__extract_addr(addr);
295
296                 bufs[i] = (struct rte_mbuf *)
297                                 xsk_umem__get_data(umem->buffer, addr +
298                                         umem->mb_pool->header_size);
299                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
300                         rte_pktmbuf_priv_size(umem->mb_pool) -
301                         umem->mb_pool->header_size;
302
303                 rte_pktmbuf_pkt_len(bufs[i]) = len;
304                 rte_pktmbuf_data_len(bufs[i]) = len;
305                 rx_bytes += len;
306         }
307
308         xsk_ring_cons__release(rx, nb_pkts);
309         (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
310
311         /* statistics */
312         rxq->stats.rx_pkts += nb_pkts;
313         rxq->stats.rx_bytes += rx_bytes;
314
315         return nb_pkts;
316 }
317 #else
318 static uint16_t
319 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
320 {
321         struct pkt_rx_queue *rxq = queue;
322         struct xsk_ring_cons *rx = &rxq->rx;
323         struct xsk_umem_info *umem = rxq->umem;
324         struct xsk_ring_prod *fq = &rxq->fq;
325         uint32_t idx_rx = 0;
326         unsigned long rx_bytes = 0;
327         int i;
328         uint32_t free_thresh = fq->size >> 1;
329         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
330
331         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
332                 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE,
333                                          NULL, fq);
334
335         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
336         if (nb_pkts == 0) {
337 #if defined(XDP_USE_NEED_WAKEUP)
338                 if (xsk_ring_prod__needs_wakeup(fq))
339                         (void)poll(rxq->fds, 1, 1000);
340 #endif
341                 return 0;
342         }
343
344         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
345                 /* rollback cached_cons which is added by
346                  * xsk_ring_cons__peek
347                  */
348                 rx->cached_cons -= nb_pkts;
349                 return 0;
350         }
351
352         for (i = 0; i < nb_pkts; i++) {
353                 const struct xdp_desc *desc;
354                 uint64_t addr;
355                 uint32_t len;
356                 void *pkt;
357
358                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
359                 addr = desc->addr;
360                 len = desc->len;
361                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
362
363                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
364                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
365                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
366                 rte_pktmbuf_data_len(mbufs[i]) = len;
367                 rx_bytes += len;
368                 bufs[i] = mbufs[i];
369         }
370
371         xsk_ring_cons__release(rx, nb_pkts);
372
373         /* statistics */
374         rxq->stats.rx_pkts += nb_pkts;
375         rxq->stats.rx_bytes += rx_bytes;
376
377         return nb_pkts;
378 }
379 #endif
380
381 static uint16_t
382 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
383 {
384         nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
385
386 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
387         return af_xdp_rx_zc(queue, bufs, nb_pkts);
388 #else
389         return af_xdp_rx_cp(queue, bufs, nb_pkts);
390 #endif
391 }
392
393 static void
394 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
395 {
396         size_t i, n;
397         uint32_t idx_cq = 0;
398
399         n = xsk_ring_cons__peek(cq, size, &idx_cq);
400
401         for (i = 0; i < n; i++) {
402                 uint64_t addr;
403                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
404 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
405                 addr = xsk_umem__extract_addr(addr);
406                 rte_pktmbuf_free((struct rte_mbuf *)
407                                         xsk_umem__get_data(umem->buffer,
408                                         addr + umem->mb_pool->header_size));
409 #else
410                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
411 #endif
412         }
413
414         xsk_ring_cons__release(cq, n);
415 }
416
417 static void
418 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
419 {
420         struct xsk_umem_info *umem = txq->umem;
421
422         pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
423
424 #if defined(XDP_USE_NEED_WAKEUP)
425         if (xsk_ring_prod__needs_wakeup(&txq->tx))
426 #endif
427                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
428                             0, MSG_DONTWAIT) < 0) {
429                         /* some thing unexpected */
430                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
431                                 break;
432
433                         /* pull from completion queue to leave more space */
434                         if (errno == EAGAIN)
435                                 pull_umem_cq(umem,
436                                              XSK_RING_CONS__DEFAULT_NUM_DESCS,
437                                              cq);
438                 }
439 }
440
441 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
442 static uint16_t
443 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
444 {
445         struct pkt_tx_queue *txq = queue;
446         struct xsk_umem_info *umem = txq->umem;
447         struct rte_mbuf *mbuf;
448         unsigned long tx_bytes = 0;
449         int i;
450         uint32_t idx_tx;
451         uint16_t count = 0;
452         struct xdp_desc *desc;
453         uint64_t addr, offset;
454         struct xsk_ring_cons *cq = &txq->pair->cq;
455         uint32_t free_thresh = cq->size >> 1;
456
457         if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
458                 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
459
460         for (i = 0; i < nb_pkts; i++) {
461                 mbuf = bufs[i];
462
463                 if (mbuf->pool == umem->mb_pool) {
464                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
465                                 kick_tx(txq, cq);
466                                 if (!xsk_ring_prod__reserve(&txq->tx, 1,
467                                                             &idx_tx))
468                                         goto out;
469                         }
470                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
471                         desc->len = mbuf->pkt_len;
472                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
473                                         umem->mb_pool->header_size;
474                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
475                                         (uint64_t)mbuf +
476                                         umem->mb_pool->header_size;
477                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
478                         desc->addr = addr | offset;
479                         count++;
480                 } else {
481                         struct rte_mbuf *local_mbuf =
482                                         rte_pktmbuf_alloc(umem->mb_pool);
483                         void *pkt;
484
485                         if (local_mbuf == NULL)
486                                 goto out;
487
488                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
489                                 rte_pktmbuf_free(local_mbuf);
490                                 kick_tx(txq, cq);
491                                 goto out;
492                         }
493
494                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
495                         desc->len = mbuf->pkt_len;
496
497                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
498                                         umem->mb_pool->header_size;
499                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
500                                         (uint64_t)local_mbuf +
501                                         umem->mb_pool->header_size;
502                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
503                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
504                         desc->addr = addr | offset;
505                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
506                                         desc->len);
507                         rte_pktmbuf_free(mbuf);
508                         count++;
509                 }
510
511                 tx_bytes += mbuf->pkt_len;
512         }
513
514         kick_tx(txq, cq);
515
516 out:
517         xsk_ring_prod__submit(&txq->tx, count);
518
519         txq->stats.tx_pkts += count;
520         txq->stats.tx_bytes += tx_bytes;
521         txq->stats.tx_dropped += nb_pkts - count;
522
523         return count;
524 }
525 #else
526 static uint16_t
527 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
528 {
529         struct pkt_tx_queue *txq = queue;
530         struct xsk_umem_info *umem = txq->umem;
531         struct rte_mbuf *mbuf;
532         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
533         unsigned long tx_bytes = 0;
534         int i;
535         uint32_t idx_tx;
536         struct xsk_ring_cons *cq = &txq->pair->cq;
537
538         nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
539
540         pull_umem_cq(umem, nb_pkts, cq);
541
542         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
543                                         nb_pkts, NULL);
544         if (nb_pkts == 0)
545                 return 0;
546
547         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
548                 kick_tx(txq, cq);
549                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
550                 return 0;
551         }
552
553         for (i = 0; i < nb_pkts; i++) {
554                 struct xdp_desc *desc;
555                 void *pkt;
556
557                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
558                 mbuf = bufs[i];
559                 desc->len = mbuf->pkt_len;
560
561                 desc->addr = (uint64_t)addrs[i];
562                 pkt = xsk_umem__get_data(umem->mz->addr,
563                                          desc->addr);
564                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
565                 tx_bytes += mbuf->pkt_len;
566                 rte_pktmbuf_free(mbuf);
567         }
568
569         xsk_ring_prod__submit(&txq->tx, nb_pkts);
570
571         kick_tx(txq, cq);
572
573         txq->stats.tx_pkts += nb_pkts;
574         txq->stats.tx_bytes += tx_bytes;
575
576         return nb_pkts;
577 }
578 #endif
579
580 static uint16_t
581 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
582 {
583 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
584         return af_xdp_tx_zc(queue, bufs, nb_pkts);
585 #else
586         return af_xdp_tx_cp(queue, bufs, nb_pkts);
587 #endif
588 }
589
590 static int
591 eth_dev_start(struct rte_eth_dev *dev)
592 {
593         dev->data->dev_link.link_status = ETH_LINK_UP;
594
595         return 0;
596 }
597
598 /* This function gets called when the current port gets stopped. */
599 static int
600 eth_dev_stop(struct rte_eth_dev *dev)
601 {
602         dev->data->dev_link.link_status = ETH_LINK_DOWN;
603         return 0;
604 }
605
606 /* Find ethdev in list */
607 static inline struct internal_list *
608 find_internal_resource(struct pmd_internals *port_int)
609 {
610         int found = 0;
611         struct internal_list *list = NULL;
612
613         if (port_int == NULL)
614                 return NULL;
615
616         pthread_mutex_lock(&internal_list_lock);
617
618         TAILQ_FOREACH(list, &internal_list, next) {
619                 struct pmd_internals *list_int =
620                                 list->eth_dev->data->dev_private;
621                 if (list_int == port_int) {
622                         found = 1;
623                         break;
624                 }
625         }
626
627         pthread_mutex_unlock(&internal_list_lock);
628
629         if (!found)
630                 return NULL;
631
632         return list;
633 }
634
635 /* Check if the netdev,qid context already exists */
636 static inline bool
637 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
638                 struct pkt_rx_queue *list_rxq, const char *list_ifname)
639 {
640         bool exists = false;
641
642         if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
643                         !strncmp(ifname, list_ifname, IFNAMSIZ)) {
644                 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
645                                         ifname, rxq->xsk_queue_idx);
646                 exists = true;
647         }
648
649         return exists;
650 }
651
652 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
653 static inline int
654 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
655                         struct xsk_umem_info **umem)
656 {
657         struct internal_list *list;
658         struct pmd_internals *internals;
659         int i = 0, ret = 0;
660         struct rte_mempool *mb_pool = rxq->mb_pool;
661
662         if (mb_pool == NULL)
663                 return ret;
664
665         pthread_mutex_lock(&internal_list_lock);
666
667         TAILQ_FOREACH(list, &internal_list, next) {
668                 internals = list->eth_dev->data->dev_private;
669                 for (i = 0; i < internals->queue_cnt; i++) {
670                         struct pkt_rx_queue *list_rxq =
671                                                 &internals->rx_queues[i];
672                         if (rxq == list_rxq)
673                                 continue;
674                         if (mb_pool == internals->rx_queues[i].mb_pool) {
675                                 if (ctx_exists(rxq, ifname, list_rxq,
676                                                 internals->if_name)) {
677                                         ret = -1;
678                                         goto out;
679                                 }
680                                 if (__atomic_load_n(
681                                         &internals->rx_queues[i].umem->refcnt,
682                                                         __ATOMIC_ACQUIRE)) {
683                                         *umem = internals->rx_queues[i].umem;
684                                         goto out;
685                                 }
686                         }
687                 }
688         }
689
690 out:
691         pthread_mutex_unlock(&internal_list_lock);
692
693         return ret;
694 }
695
696 static int
697 eth_dev_configure(struct rte_eth_dev *dev)
698 {
699         struct pmd_internals *internal = dev->data->dev_private;
700
701         /* rx/tx must be paired */
702         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
703                 return -EINVAL;
704
705         if (internal->shared_umem) {
706                 struct internal_list *list = NULL;
707                 const char *name = dev->device->name;
708
709                 /* Ensure PMD is not already inserted into the list */
710                 list = find_internal_resource(internal);
711                 if (list)
712                         return 0;
713
714                 list = rte_zmalloc_socket(name, sizeof(*list), 0,
715                                         dev->device->numa_node);
716                 if (list == NULL)
717                         return -1;
718
719                 list->eth_dev = dev;
720                 pthread_mutex_lock(&internal_list_lock);
721                 TAILQ_INSERT_TAIL(&internal_list, list, next);
722                 pthread_mutex_unlock(&internal_list_lock);
723         }
724
725         return 0;
726 }
727
728 static int
729 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
730 {
731         struct pmd_internals *internals = dev->data->dev_private;
732
733         dev_info->if_index = internals->if_index;
734         dev_info->max_mac_addrs = 1;
735         dev_info->max_rx_pktlen = ETH_FRAME_LEN;
736         dev_info->max_rx_queues = internals->queue_cnt;
737         dev_info->max_tx_queues = internals->queue_cnt;
738
739         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
740 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
741         dev_info->max_mtu = getpagesize() -
742                                 sizeof(struct rte_mempool_objhdr) -
743                                 sizeof(struct rte_mbuf) -
744                                 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
745 #else
746         dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
747 #endif
748
749         dev_info->default_rxportconf.nb_queues = 1;
750         dev_info->default_txportconf.nb_queues = 1;
751         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
752         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
753
754         return 0;
755 }
756
757 static int
758 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
759 {
760         struct pmd_internals *internals = dev->data->dev_private;
761         struct xdp_statistics xdp_stats;
762         struct pkt_rx_queue *rxq;
763         struct pkt_tx_queue *txq;
764         socklen_t optlen;
765         int i, ret;
766
767         for (i = 0; i < dev->data->nb_rx_queues; i++) {
768                 optlen = sizeof(struct xdp_statistics);
769                 rxq = &internals->rx_queues[i];
770                 txq = rxq->pair;
771                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
772                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
773
774                 stats->q_opackets[i] = txq->stats.tx_pkts;
775                 stats->q_obytes[i] = txq->stats.tx_bytes;
776
777                 stats->ipackets += stats->q_ipackets[i];
778                 stats->ibytes += stats->q_ibytes[i];
779                 stats->imissed += rxq->stats.rx_dropped;
780                 stats->oerrors += txq->stats.tx_dropped;
781                 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
782                                 XDP_STATISTICS, &xdp_stats, &optlen);
783                 if (ret != 0) {
784                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
785                         return -1;
786                 }
787                 stats->imissed += xdp_stats.rx_dropped;
788
789                 stats->opackets += stats->q_opackets[i];
790                 stats->obytes += stats->q_obytes[i];
791         }
792
793         return 0;
794 }
795
796 static int
797 eth_stats_reset(struct rte_eth_dev *dev)
798 {
799         struct pmd_internals *internals = dev->data->dev_private;
800         int i;
801
802         for (i = 0; i < internals->queue_cnt; i++) {
803                 memset(&internals->rx_queues[i].stats, 0,
804                                         sizeof(struct rx_stats));
805                 memset(&internals->tx_queues[i].stats, 0,
806                                         sizeof(struct tx_stats));
807         }
808
809         return 0;
810 }
811
812 static void
813 remove_xdp_program(struct pmd_internals *internals)
814 {
815         uint32_t curr_prog_id = 0;
816
817         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
818                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
819                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
820                 return;
821         }
822         bpf_set_link_xdp_fd(internals->if_index, -1,
823                         XDP_FLAGS_UPDATE_IF_NOEXIST);
824 }
825
826 static void
827 xdp_umem_destroy(struct xsk_umem_info *umem)
828 {
829 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
830         umem->mb_pool = NULL;
831 #else
832         rte_memzone_free(umem->mz);
833         umem->mz = NULL;
834
835         rte_ring_free(umem->buf_ring);
836         umem->buf_ring = NULL;
837 #endif
838
839         rte_free(umem);
840 }
841
842 static int
843 eth_dev_close(struct rte_eth_dev *dev)
844 {
845         struct pmd_internals *internals = dev->data->dev_private;
846         struct pkt_rx_queue *rxq;
847         int i;
848
849         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
850                 return 0;
851
852         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
853                 rte_socket_id());
854
855         for (i = 0; i < internals->queue_cnt; i++) {
856                 rxq = &internals->rx_queues[i];
857                 if (rxq->umem == NULL)
858                         break;
859                 xsk_socket__delete(rxq->xsk);
860
861                 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
862                                 == 0) {
863                         (void)xsk_umem__delete(rxq->umem->umem);
864                         xdp_umem_destroy(rxq->umem);
865                 }
866
867                 /* free pkt_tx_queue */
868                 rte_free(rxq->pair);
869                 rte_free(rxq);
870         }
871
872         /*
873          * MAC is not allocated dynamically, setting it to NULL would prevent
874          * from releasing it in rte_eth_dev_release_port.
875          */
876         dev->data->mac_addrs = NULL;
877
878         remove_xdp_program(internals);
879
880         if (internals->shared_umem) {
881                 struct internal_list *list;
882
883                 /* Remove ethdev from list used to track and share UMEMs */
884                 list = find_internal_resource(internals);
885                 if (list) {
886                         pthread_mutex_lock(&internal_list_lock);
887                         TAILQ_REMOVE(&internal_list, list, next);
888                         pthread_mutex_unlock(&internal_list_lock);
889                         rte_free(list);
890                 }
891         }
892
893         return 0;
894 }
895
896 static void
897 eth_queue_release(void *q __rte_unused)
898 {
899 }
900
901 static int
902 eth_link_update(struct rte_eth_dev *dev __rte_unused,
903                 int wait_to_complete __rte_unused)
904 {
905         return 0;
906 }
907
908 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
909 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
910 {
911         struct rte_mempool_memhdr *memhdr;
912         uintptr_t memhdr_addr, aligned_addr;
913
914         memhdr = STAILQ_FIRST(&mp->mem_list);
915         memhdr_addr = (uintptr_t)memhdr->addr;
916         aligned_addr = memhdr_addr & ~(getpagesize() - 1);
917         *align = memhdr_addr - aligned_addr;
918
919         return aligned_addr;
920 }
921
922 static struct
923 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
924                                   struct pkt_rx_queue *rxq)
925 {
926         struct xsk_umem_info *umem = NULL;
927         int ret;
928         struct xsk_umem_config usr_config = {
929                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
930                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
931                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
932         void *base_addr = NULL;
933         struct rte_mempool *mb_pool = rxq->mb_pool;
934         uint64_t umem_size, align = 0;
935
936         if (internals->shared_umem) {
937                 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
938                         return NULL;
939
940                 if (umem != NULL &&
941                         __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
942                                         umem->max_xsks) {
943                         AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
944                                         internals->if_name, rxq->xsk_queue_idx);
945                         __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
946                 }
947         }
948
949         if (umem == NULL) {
950                 usr_config.frame_size =
951                         rte_mempool_calc_obj_size(mb_pool->elt_size,
952                                                   mb_pool->flags, NULL);
953                 usr_config.frame_headroom = mb_pool->header_size +
954                                                 sizeof(struct rte_mbuf) +
955                                                 rte_pktmbuf_priv_size(mb_pool) +
956                                                 RTE_PKTMBUF_HEADROOM;
957
958                 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
959                                           rte_socket_id());
960                 if (umem == NULL) {
961                         AF_XDP_LOG(ERR, "Failed to allocate umem info");
962                         return NULL;
963                 }
964
965                 umem->mb_pool = mb_pool;
966                 base_addr = (void *)get_base_addr(mb_pool, &align);
967                 umem_size = (uint64_t)mb_pool->populated_size *
968                                 (uint64_t)usr_config.frame_size +
969                                 align;
970
971                 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
972                                 &rxq->fq, &rxq->cq, &usr_config);
973                 if (ret) {
974                         AF_XDP_LOG(ERR, "Failed to create umem");
975                         goto err;
976                 }
977                 umem->buffer = base_addr;
978
979                 if (internals->shared_umem) {
980                         umem->max_xsks = mb_pool->populated_size /
981                                                 ETH_AF_XDP_NUM_BUFFERS;
982                         AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
983                                                 mb_pool->name, umem->max_xsks);
984                 }
985
986                 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
987         }
988
989 #else
990 static struct
991 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
992                                   struct pkt_rx_queue *rxq)
993 {
994         struct xsk_umem_info *umem;
995         const struct rte_memzone *mz;
996         struct xsk_umem_config usr_config = {
997                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
998                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
999                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1000                 .frame_headroom = 0 };
1001         char ring_name[RTE_RING_NAMESIZE];
1002         char mz_name[RTE_MEMZONE_NAMESIZE];
1003         int ret;
1004         uint64_t i;
1005
1006         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1007         if (umem == NULL) {
1008                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1009                 return NULL;
1010         }
1011
1012         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1013                        internals->if_name, rxq->xsk_queue_idx);
1014         umem->buf_ring = rte_ring_create(ring_name,
1015                                          ETH_AF_XDP_NUM_BUFFERS,
1016                                          rte_socket_id(),
1017                                          0x0);
1018         if (umem->buf_ring == NULL) {
1019                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1020                 goto err;
1021         }
1022
1023         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1024                 rte_ring_enqueue(umem->buf_ring,
1025                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1026
1027         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1028                        internals->if_name, rxq->xsk_queue_idx);
1029         mz = rte_memzone_reserve_aligned(mz_name,
1030                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1031                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1032                         getpagesize());
1033         if (mz == NULL) {
1034                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1035                 goto err;
1036         }
1037
1038         ret = xsk_umem__create(&umem->umem, mz->addr,
1039                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1040                                &rxq->fq, &rxq->cq,
1041                                &usr_config);
1042
1043         if (ret) {
1044                 AF_XDP_LOG(ERR, "Failed to create umem");
1045                 goto err;
1046         }
1047         umem->mz = mz;
1048
1049 #endif
1050         return umem;
1051
1052 err:
1053         xdp_umem_destroy(umem);
1054         return NULL;
1055 }
1056
1057 static int
1058 load_custom_xdp_prog(const char *prog_path, int if_index)
1059 {
1060         int ret, prog_fd = -1;
1061         struct bpf_object *obj;
1062         struct bpf_map *map;
1063
1064         ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1065         if (ret) {
1066                 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1067                 return ret;
1068         }
1069
1070         /*
1071          * The loaded program must provision for a map of xsks, such that some
1072          * traffic can be redirected to userspace. When the xsk is created,
1073          * libbpf inserts it into the map.
1074          */
1075         map = bpf_object__find_map_by_name(obj, "xsks_map");
1076         if (!map) {
1077                 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1078                 return -1;
1079         }
1080
1081         /* Link the program with the given network device */
1082         ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1083                                         XDP_FLAGS_UPDATE_IF_NOEXIST);
1084         if (ret) {
1085                 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1086                                 prog_fd);
1087                 return -1;
1088         }
1089
1090         AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1091                                 prog_path, prog_fd);
1092
1093         return 0;
1094 }
1095
1096 static int
1097 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1098               int ring_size)
1099 {
1100         struct xsk_socket_config cfg;
1101         struct pkt_tx_queue *txq = rxq->pair;
1102         int ret = 0;
1103         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1104         struct rte_mbuf *fq_bufs[reserve_size];
1105
1106         rxq->umem = xdp_umem_configure(internals, rxq);
1107         if (rxq->umem == NULL)
1108                 return -ENOMEM;
1109         txq->umem = rxq->umem;
1110
1111         cfg.rx_size = ring_size;
1112         cfg.tx_size = ring_size;
1113         cfg.libbpf_flags = 0;
1114         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1115         cfg.bind_flags = 0;
1116
1117 #if defined(XDP_USE_NEED_WAKEUP)
1118         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1119 #endif
1120
1121         if (strnlen(internals->prog_path, PATH_MAX) &&
1122                                 !internals->custom_prog_configured) {
1123                 ret = load_custom_xdp_prog(internals->prog_path,
1124                                            internals->if_index);
1125                 if (ret) {
1126                         AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1127                                         internals->prog_path);
1128                         goto err;
1129                 }
1130                 internals->custom_prog_configured = 1;
1131         }
1132
1133         if (internals->shared_umem)
1134                 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1135                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1136                                 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1137         else
1138                 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1139                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1140                                 &txq->tx, &cfg);
1141
1142         if (ret) {
1143                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1144                 goto err;
1145         }
1146
1147 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1148         ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1149         if (ret) {
1150                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1151                 goto err;
1152         }
1153 #endif
1154         ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1155         if (ret) {
1156                 xsk_socket__delete(rxq->xsk);
1157                 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1158                 goto err;
1159         }
1160
1161         return 0;
1162
1163 err:
1164         if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1165                 xdp_umem_destroy(rxq->umem);
1166
1167         return ret;
1168 }
1169
1170 static int
1171 eth_rx_queue_setup(struct rte_eth_dev *dev,
1172                    uint16_t rx_queue_id,
1173                    uint16_t nb_rx_desc,
1174                    unsigned int socket_id __rte_unused,
1175                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1176                    struct rte_mempool *mb_pool)
1177 {
1178         struct pmd_internals *internals = dev->data->dev_private;
1179         struct pkt_rx_queue *rxq;
1180         int ret;
1181
1182         rxq = &internals->rx_queues[rx_queue_id];
1183
1184         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1185                    rx_queue_id, rxq->xsk_queue_idx);
1186
1187 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1188         uint32_t buf_size, data_size;
1189
1190         /* Now get the space available for data in the mbuf */
1191         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1192                 RTE_PKTMBUF_HEADROOM;
1193         data_size = ETH_AF_XDP_FRAME_SIZE;
1194
1195         if (data_size > buf_size) {
1196                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1197                         dev->device->name, data_size, buf_size);
1198                 ret = -ENOMEM;
1199                 goto err;
1200         }
1201 #endif
1202
1203         rxq->mb_pool = mb_pool;
1204
1205         if (xsk_configure(internals, rxq, nb_rx_desc)) {
1206                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1207                 ret = -EINVAL;
1208                 goto err;
1209         }
1210
1211         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1212         rxq->fds[0].events = POLLIN;
1213
1214         dev->data->rx_queues[rx_queue_id] = rxq;
1215         return 0;
1216
1217 err:
1218         return ret;
1219 }
1220
1221 static int
1222 eth_tx_queue_setup(struct rte_eth_dev *dev,
1223                    uint16_t tx_queue_id,
1224                    uint16_t nb_tx_desc __rte_unused,
1225                    unsigned int socket_id __rte_unused,
1226                    const struct rte_eth_txconf *tx_conf __rte_unused)
1227 {
1228         struct pmd_internals *internals = dev->data->dev_private;
1229         struct pkt_tx_queue *txq;
1230
1231         txq = &internals->tx_queues[tx_queue_id];
1232
1233         dev->data->tx_queues[tx_queue_id] = txq;
1234         return 0;
1235 }
1236
1237 static int
1238 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1239 {
1240         struct pmd_internals *internals = dev->data->dev_private;
1241         struct ifreq ifr = { .ifr_mtu = mtu };
1242         int ret;
1243         int s;
1244
1245         s = socket(PF_INET, SOCK_DGRAM, 0);
1246         if (s < 0)
1247                 return -EINVAL;
1248
1249         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1250         ret = ioctl(s, SIOCSIFMTU, &ifr);
1251         close(s);
1252
1253         return (ret < 0) ? -errno : 0;
1254 }
1255
1256 static int
1257 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1258 {
1259         struct ifreq ifr;
1260         int ret = 0;
1261         int s;
1262
1263         s = socket(PF_INET, SOCK_DGRAM, 0);
1264         if (s < 0)
1265                 return -errno;
1266
1267         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1268         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1269                 ret = -errno;
1270                 goto out;
1271         }
1272         ifr.ifr_flags &= mask;
1273         ifr.ifr_flags |= flags;
1274         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1275                 ret = -errno;
1276                 goto out;
1277         }
1278 out:
1279         close(s);
1280         return ret;
1281 }
1282
1283 static int
1284 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1285 {
1286         struct pmd_internals *internals = dev->data->dev_private;
1287
1288         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1289 }
1290
1291 static int
1292 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1293 {
1294         struct pmd_internals *internals = dev->data->dev_private;
1295
1296         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1297 }
1298
1299 static const struct eth_dev_ops ops = {
1300         .dev_start = eth_dev_start,
1301         .dev_stop = eth_dev_stop,
1302         .dev_close = eth_dev_close,
1303         .dev_configure = eth_dev_configure,
1304         .dev_infos_get = eth_dev_info,
1305         .mtu_set = eth_dev_mtu_set,
1306         .promiscuous_enable = eth_dev_promiscuous_enable,
1307         .promiscuous_disable = eth_dev_promiscuous_disable,
1308         .rx_queue_setup = eth_rx_queue_setup,
1309         .tx_queue_setup = eth_tx_queue_setup,
1310         .rx_queue_release = eth_queue_release,
1311         .tx_queue_release = eth_queue_release,
1312         .link_update = eth_link_update,
1313         .stats_get = eth_stats_get,
1314         .stats_reset = eth_stats_reset,
1315 };
1316
1317 /** parse integer from integer argument */
1318 static int
1319 parse_integer_arg(const char *key __rte_unused,
1320                   const char *value, void *extra_args)
1321 {
1322         int *i = (int *)extra_args;
1323         char *end;
1324
1325         *i = strtol(value, &end, 10);
1326         if (*i < 0) {
1327                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1328                 return -EINVAL;
1329         }
1330
1331         return 0;
1332 }
1333
1334 /** parse name argument */
1335 static int
1336 parse_name_arg(const char *key __rte_unused,
1337                const char *value, void *extra_args)
1338 {
1339         char *name = extra_args;
1340
1341         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1342                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1343                            value, IFNAMSIZ);
1344                 return -EINVAL;
1345         }
1346
1347         strlcpy(name, value, IFNAMSIZ);
1348
1349         return 0;
1350 }
1351
1352 /** parse xdp prog argument */
1353 static int
1354 parse_prog_arg(const char *key __rte_unused,
1355                const char *value, void *extra_args)
1356 {
1357         char *path = extra_args;
1358
1359         if (strnlen(value, PATH_MAX) == PATH_MAX) {
1360                 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1361                            value, PATH_MAX);
1362                 return -EINVAL;
1363         }
1364
1365         if (access(value, F_OK) != 0) {
1366                 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1367                            value, strerror(errno));
1368                 return -EINVAL;
1369         }
1370
1371         strlcpy(path, value, PATH_MAX);
1372
1373         return 0;
1374 }
1375
1376 static int
1377 xdp_get_channels_info(const char *if_name, int *max_queues,
1378                                 int *combined_queues)
1379 {
1380         struct ethtool_channels channels;
1381         struct ifreq ifr;
1382         int fd, ret;
1383
1384         fd = socket(AF_INET, SOCK_DGRAM, 0);
1385         if (fd < 0)
1386                 return -1;
1387
1388         channels.cmd = ETHTOOL_GCHANNELS;
1389         ifr.ifr_data = (void *)&channels;
1390         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1391         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1392         if (ret) {
1393                 if (errno == EOPNOTSUPP) {
1394                         ret = 0;
1395                 } else {
1396                         ret = -errno;
1397                         goto out;
1398                 }
1399         }
1400
1401         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1402                 /* If the device says it has no channels, then all traffic
1403                  * is sent to a single stream, so max queues = 1.
1404                  */
1405                 *max_queues = 1;
1406                 *combined_queues = 1;
1407         } else {
1408                 *max_queues = channels.max_combined;
1409                 *combined_queues = channels.combined_count;
1410         }
1411
1412  out:
1413         close(fd);
1414         return ret;
1415 }
1416
1417 static int
1418 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1419                         int *queue_cnt, int *shared_umem, char *prog_path)
1420 {
1421         int ret;
1422
1423         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1424                                  &parse_name_arg, if_name);
1425         if (ret < 0)
1426                 goto free_kvlist;
1427
1428         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1429                                  &parse_integer_arg, start_queue);
1430         if (ret < 0)
1431                 goto free_kvlist;
1432
1433         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1434                                  &parse_integer_arg, queue_cnt);
1435         if (ret < 0 || *queue_cnt <= 0) {
1436                 ret = -EINVAL;
1437                 goto free_kvlist;
1438         }
1439
1440         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1441                                 &parse_integer_arg, shared_umem);
1442         if (ret < 0)
1443                 goto free_kvlist;
1444
1445         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1446                                  &parse_prog_arg, prog_path);
1447         if (ret < 0)
1448                 goto free_kvlist;
1449
1450 free_kvlist:
1451         rte_kvargs_free(kvlist);
1452         return ret;
1453 }
1454
1455 static int
1456 get_iface_info(const char *if_name,
1457                struct rte_ether_addr *eth_addr,
1458                int *if_index)
1459 {
1460         struct ifreq ifr;
1461         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1462
1463         if (sock < 0)
1464                 return -1;
1465
1466         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1467         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1468                 goto error;
1469
1470         *if_index = ifr.ifr_ifindex;
1471
1472         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1473                 goto error;
1474
1475         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1476
1477         close(sock);
1478         return 0;
1479
1480 error:
1481         close(sock);
1482         return -1;
1483 }
1484
1485 static struct rte_eth_dev *
1486 init_internals(struct rte_vdev_device *dev, const char *if_name,
1487                 int start_queue_idx, int queue_cnt, int shared_umem,
1488                 const char *prog_path)
1489 {
1490         const char *name = rte_vdev_device_name(dev);
1491         const unsigned int numa_node = dev->device.numa_node;
1492         struct pmd_internals *internals;
1493         struct rte_eth_dev *eth_dev;
1494         int ret;
1495         int i;
1496
1497         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1498         if (internals == NULL)
1499                 return NULL;
1500
1501         internals->start_queue_idx = start_queue_idx;
1502         internals->queue_cnt = queue_cnt;
1503         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1504         strlcpy(internals->prog_path, prog_path, PATH_MAX);
1505         internals->custom_prog_configured = 0;
1506
1507 #ifndef ETH_AF_XDP_SHARED_UMEM
1508         if (shared_umem) {
1509                 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1510                                 "Check kernel and libbpf version\n");
1511                 goto err_free_internals;
1512         }
1513 #endif
1514         internals->shared_umem = shared_umem;
1515
1516         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1517                                   &internals->combined_queue_cnt)) {
1518                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1519                                 if_name);
1520                 goto err_free_internals;
1521         }
1522
1523         if (queue_cnt > internals->combined_queue_cnt) {
1524                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1525                                 queue_cnt, internals->combined_queue_cnt);
1526                 goto err_free_internals;
1527         }
1528
1529         internals->rx_queues = rte_zmalloc_socket(NULL,
1530                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1531                                         0, numa_node);
1532         if (internals->rx_queues == NULL) {
1533                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1534                 goto err_free_internals;
1535         }
1536
1537         internals->tx_queues = rte_zmalloc_socket(NULL,
1538                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1539                                         0, numa_node);
1540         if (internals->tx_queues == NULL) {
1541                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1542                 goto err_free_rx;
1543         }
1544         for (i = 0; i < queue_cnt; i++) {
1545                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1546                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1547                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1548                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1549         }
1550
1551         ret = get_iface_info(if_name, &internals->eth_addr,
1552                              &internals->if_index);
1553         if (ret)
1554                 goto err_free_tx;
1555
1556         eth_dev = rte_eth_vdev_allocate(dev, 0);
1557         if (eth_dev == NULL)
1558                 goto err_free_tx;
1559
1560         eth_dev->data->dev_private = internals;
1561         eth_dev->data->dev_link = pmd_link;
1562         eth_dev->data->mac_addrs = &internals->eth_addr;
1563         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1564         eth_dev->dev_ops = &ops;
1565         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1566         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1567
1568 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1569         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1570 #endif
1571
1572         return eth_dev;
1573
1574 err_free_tx:
1575         rte_free(internals->tx_queues);
1576 err_free_rx:
1577         rte_free(internals->rx_queues);
1578 err_free_internals:
1579         rte_free(internals);
1580         return NULL;
1581 }
1582
1583 static int
1584 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1585 {
1586         struct rte_kvargs *kvlist;
1587         char if_name[IFNAMSIZ] = {'\0'};
1588         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1589         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1590         int shared_umem = 0;
1591         char prog_path[PATH_MAX] = {'\0'};
1592         struct rte_eth_dev *eth_dev = NULL;
1593         const char *name;
1594
1595         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1596                 rte_vdev_device_name(dev));
1597
1598         name = rte_vdev_device_name(dev);
1599         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1600                 strlen(rte_vdev_device_args(dev)) == 0) {
1601                 eth_dev = rte_eth_dev_attach_secondary(name);
1602                 if (eth_dev == NULL) {
1603                         AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1604                         return -EINVAL;
1605                 }
1606                 eth_dev->dev_ops = &ops;
1607                 rte_eth_dev_probing_finish(eth_dev);
1608                 return 0;
1609         }
1610
1611         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1612         if (kvlist == NULL) {
1613                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1614                 return -EINVAL;
1615         }
1616
1617         if (dev->device.numa_node == SOCKET_ID_ANY)
1618                 dev->device.numa_node = rte_socket_id();
1619
1620         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1621                              &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
1622                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1623                 return -EINVAL;
1624         }
1625
1626         if (strlen(if_name) == 0) {
1627                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1628                 return -EINVAL;
1629         }
1630
1631         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1632                                         xsk_queue_cnt, shared_umem, prog_path);
1633         if (eth_dev == NULL) {
1634                 AF_XDP_LOG(ERR, "Failed to init internals\n");
1635                 return -1;
1636         }
1637
1638         rte_eth_dev_probing_finish(eth_dev);
1639
1640         return 0;
1641 }
1642
1643 static int
1644 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1645 {
1646         struct rte_eth_dev *eth_dev = NULL;
1647
1648         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1649                 rte_socket_id());
1650
1651         if (dev == NULL)
1652                 return -1;
1653
1654         /* find the ethdev entry */
1655         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1656         if (eth_dev == NULL)
1657                 return 0;
1658
1659         eth_dev_close(eth_dev);
1660         rte_eth_dev_release_port(eth_dev);
1661
1662
1663         return 0;
1664 }
1665
1666 static struct rte_vdev_driver pmd_af_xdp_drv = {
1667         .probe = rte_pmd_af_xdp_probe,
1668         .remove = rte_pmd_af_xdp_remove,
1669 };
1670
1671 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1672 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1673                               "iface=<string> "
1674                               "start_queue=<int> "
1675                               "queue_count=<int> "
1676                               "shared_umem=<int> "
1677                               "xdp_prog=<string> ");