net/af_xdp: use recvfrom instead of poll syscall
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <poll.h>
9 #include <netinet/in.h>
10 #include <net/if.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
19 #include <bpf/xsk.h>
20
21 #include <rte_ethdev.h>
22 #include <ethdev_driver.h>
23 #include <ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_dev.h>
30 #include <rte_eal.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
33 #include <rte_log.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_malloc.h>
39 #include <rte_ring.h>
40 #include <rte_spinlock.h>
41
42 #include "compat.h"
43
44
45 #ifndef SOL_XDP
46 #define SOL_XDP 283
47 #endif
48
49 #ifndef AF_XDP
50 #define AF_XDP 44
51 #endif
52
53 #ifndef PF_XDP
54 #define PF_XDP AF_XDP
55 #endif
56
57 RTE_LOG_REGISTER(af_xdp_logtype, pmd.net.af_xdp, NOTICE);
58
59 #define AF_XDP_LOG(level, fmt, args...)                 \
60         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
61                 "%s(): " fmt, __func__, ##args)
62
63 #define ETH_AF_XDP_FRAME_SIZE           2048
64 #define ETH_AF_XDP_NUM_BUFFERS          4096
65 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
66 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
67 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
68
69 #define ETH_AF_XDP_RX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
70 #define ETH_AF_XDP_TX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
71
72
73 struct xsk_umem_info {
74         struct xsk_umem *umem;
75         struct rte_ring *buf_ring;
76         const struct rte_memzone *mz;
77         struct rte_mempool *mb_pool;
78         void *buffer;
79         uint8_t refcnt;
80         uint32_t max_xsks;
81 };
82
83 struct rx_stats {
84         uint64_t rx_pkts;
85         uint64_t rx_bytes;
86         uint64_t rx_dropped;
87 };
88
89 struct pkt_rx_queue {
90         struct xsk_ring_cons rx;
91         struct xsk_umem_info *umem;
92         struct xsk_socket *xsk;
93         struct rte_mempool *mb_pool;
94
95         struct rx_stats stats;
96
97         struct xsk_ring_prod fq;
98         struct xsk_ring_cons cq;
99
100         struct pkt_tx_queue *pair;
101         struct pollfd fds[1];
102         int xsk_queue_idx;
103 };
104
105 struct tx_stats {
106         uint64_t tx_pkts;
107         uint64_t tx_bytes;
108         uint64_t tx_dropped;
109 };
110
111 struct pkt_tx_queue {
112         struct xsk_ring_prod tx;
113         struct xsk_umem_info *umem;
114
115         struct tx_stats stats;
116
117         struct pkt_rx_queue *pair;
118         int xsk_queue_idx;
119 };
120
121 struct pmd_internals {
122         int if_index;
123         char if_name[IFNAMSIZ];
124         int start_queue_idx;
125         int queue_cnt;
126         int max_queue_cnt;
127         int combined_queue_cnt;
128         bool shared_umem;
129         char prog_path[PATH_MAX];
130         bool custom_prog_configured;
131
132         struct rte_ether_addr eth_addr;
133
134         struct pkt_rx_queue *rx_queues;
135         struct pkt_tx_queue *tx_queues;
136 };
137
138 #define ETH_AF_XDP_IFACE_ARG                    "iface"
139 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
140 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
141 #define ETH_AF_XDP_SHARED_UMEM_ARG              "shared_umem"
142 #define ETH_AF_XDP_PROG_ARG                     "xdp_prog"
143
144 static const char * const valid_arguments[] = {
145         ETH_AF_XDP_IFACE_ARG,
146         ETH_AF_XDP_START_QUEUE_ARG,
147         ETH_AF_XDP_QUEUE_COUNT_ARG,
148         ETH_AF_XDP_SHARED_UMEM_ARG,
149         ETH_AF_XDP_PROG_ARG,
150         NULL
151 };
152
153 static const struct rte_eth_link pmd_link = {
154         .link_speed = ETH_SPEED_NUM_10G,
155         .link_duplex = ETH_LINK_FULL_DUPLEX,
156         .link_status = ETH_LINK_DOWN,
157         .link_autoneg = ETH_LINK_AUTONEG
158 };
159
160 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
161 struct internal_list {
162         TAILQ_ENTRY(internal_list) next;
163         struct rte_eth_dev *eth_dev;
164 };
165
166 TAILQ_HEAD(internal_list_head, internal_list);
167 static struct internal_list_head internal_list =
168         TAILQ_HEAD_INITIALIZER(internal_list);
169
170 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
171
172 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
173 static inline int
174 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
175                       struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
176 {
177         uint32_t idx;
178         uint16_t i;
179
180         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
181                 for (i = 0; i < reserve_size; i++)
182                         rte_pktmbuf_free(bufs[i]);
183                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
184                 return -1;
185         }
186
187         for (i = 0; i < reserve_size; i++) {
188                 __u64 *fq_addr;
189                 uint64_t addr;
190
191                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
192                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
193                                 umem->mb_pool->header_size;
194                 *fq_addr = addr;
195         }
196
197         xsk_ring_prod__submit(fq, reserve_size);
198
199         return 0;
200 }
201 #else
202 static inline int
203 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
204                       struct rte_mbuf **bufs __rte_unused,
205                       struct xsk_ring_prod *fq)
206 {
207         void *addrs[reserve_size];
208         uint32_t idx;
209         uint16_t i;
210
211         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
212                     != reserve_size) {
213                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
214                 return -1;
215         }
216
217         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
218                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
219                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
220                                 reserve_size, NULL);
221                 return -1;
222         }
223
224         for (i = 0; i < reserve_size; i++) {
225                 __u64 *fq_addr;
226
227                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
228                 *fq_addr = (uint64_t)addrs[i];
229         }
230
231         xsk_ring_prod__submit(fq, reserve_size);
232
233         return 0;
234 }
235 #endif
236
237 static inline int
238 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
239                    struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
240 {
241 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
242         return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
243 #else
244         return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
245 #endif
246 }
247
248 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
249 static uint16_t
250 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
251 {
252         struct pkt_rx_queue *rxq = queue;
253         struct xsk_ring_cons *rx = &rxq->rx;
254         struct xsk_ring_prod *fq = &rxq->fq;
255         struct xsk_umem_info *umem = rxq->umem;
256         uint32_t idx_rx = 0;
257         unsigned long rx_bytes = 0;
258         int i;
259         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
260
261         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
262
263         if (nb_pkts == 0) {
264 #if defined(XDP_USE_NEED_WAKEUP)
265                 if (xsk_ring_prod__needs_wakeup(fq))
266                         recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
267                                 MSG_DONTWAIT, NULL, NULL);
268 #endif
269
270                 return 0;
271         }
272
273         /* allocate bufs for fill queue replenishment after rx */
274         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
275                 AF_XDP_LOG(DEBUG,
276                         "Failed to get enough buffers for fq.\n");
277                 /* rollback cached_cons which is added by
278                  * xsk_ring_cons__peek
279                  */
280                 rx->cached_cons -= nb_pkts;
281                 return 0;
282         }
283
284         for (i = 0; i < nb_pkts; i++) {
285                 const struct xdp_desc *desc;
286                 uint64_t addr;
287                 uint32_t len;
288                 uint64_t offset;
289
290                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
291                 addr = desc->addr;
292                 len = desc->len;
293
294                 offset = xsk_umem__extract_offset(addr);
295                 addr = xsk_umem__extract_addr(addr);
296
297                 bufs[i] = (struct rte_mbuf *)
298                                 xsk_umem__get_data(umem->buffer, addr +
299                                         umem->mb_pool->header_size);
300                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
301                         rte_pktmbuf_priv_size(umem->mb_pool) -
302                         umem->mb_pool->header_size;
303
304                 rte_pktmbuf_pkt_len(bufs[i]) = len;
305                 rte_pktmbuf_data_len(bufs[i]) = len;
306                 rx_bytes += len;
307         }
308
309         xsk_ring_cons__release(rx, nb_pkts);
310         (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
311
312         /* statistics */
313         rxq->stats.rx_pkts += nb_pkts;
314         rxq->stats.rx_bytes += rx_bytes;
315
316         return nb_pkts;
317 }
318 #else
319 static uint16_t
320 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
321 {
322         struct pkt_rx_queue *rxq = queue;
323         struct xsk_ring_cons *rx = &rxq->rx;
324         struct xsk_umem_info *umem = rxq->umem;
325         struct xsk_ring_prod *fq = &rxq->fq;
326         uint32_t idx_rx = 0;
327         unsigned long rx_bytes = 0;
328         int i;
329         uint32_t free_thresh = fq->size >> 1;
330         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
331
332         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
333                 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
334
335         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
336         if (nb_pkts == 0) {
337 #if defined(XDP_USE_NEED_WAKEUP)
338                 if (xsk_ring_prod__needs_wakeup(fq))
339                         recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
340                                 MSG_DONTWAIT, NULL, NULL);
341 #endif
342                 return 0;
343         }
344
345         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
346                 /* rollback cached_cons which is added by
347                  * xsk_ring_cons__peek
348                  */
349                 rx->cached_cons -= nb_pkts;
350                 return 0;
351         }
352
353         for (i = 0; i < nb_pkts; i++) {
354                 const struct xdp_desc *desc;
355                 uint64_t addr;
356                 uint32_t len;
357                 void *pkt;
358
359                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
360                 addr = desc->addr;
361                 len = desc->len;
362                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
363
364                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
365                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
366                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
367                 rte_pktmbuf_data_len(mbufs[i]) = len;
368                 rx_bytes += len;
369                 bufs[i] = mbufs[i];
370         }
371
372         xsk_ring_cons__release(rx, nb_pkts);
373
374         /* statistics */
375         rxq->stats.rx_pkts += nb_pkts;
376         rxq->stats.rx_bytes += rx_bytes;
377
378         return nb_pkts;
379 }
380 #endif
381
382 static uint16_t
383 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
384 {
385 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
386         return af_xdp_rx_zc(queue, bufs, nb_pkts);
387 #else
388         return af_xdp_rx_cp(queue, bufs, nb_pkts);
389 #endif
390 }
391
392 static uint16_t
393 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
394 {
395         uint16_t nb_rx;
396
397         if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
398                 return af_xdp_rx(queue, bufs, nb_pkts);
399
400         /* Split larger batch into smaller batches of size
401          * ETH_AF_XDP_RX_BATCH_SIZE or less.
402          */
403         nb_rx = 0;
404         while (nb_pkts) {
405                 uint16_t ret, n;
406
407                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
408                 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
409                 nb_rx = (uint16_t)(nb_rx + ret);
410                 nb_pkts = (uint16_t)(nb_pkts - ret);
411                 if (ret < n)
412                         break;
413         }
414
415         return nb_rx;
416 }
417
418 static void
419 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
420 {
421         size_t i, n;
422         uint32_t idx_cq = 0;
423
424         n = xsk_ring_cons__peek(cq, size, &idx_cq);
425
426         for (i = 0; i < n; i++) {
427                 uint64_t addr;
428                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
429 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
430                 addr = xsk_umem__extract_addr(addr);
431                 rte_pktmbuf_free((struct rte_mbuf *)
432                                         xsk_umem__get_data(umem->buffer,
433                                         addr + umem->mb_pool->header_size));
434 #else
435                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
436 #endif
437         }
438
439         xsk_ring_cons__release(cq, n);
440 }
441
442 static void
443 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
444 {
445         struct xsk_umem_info *umem = txq->umem;
446
447         pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
448
449 #if defined(XDP_USE_NEED_WAKEUP)
450         if (xsk_ring_prod__needs_wakeup(&txq->tx))
451 #endif
452                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
453                             0, MSG_DONTWAIT) < 0) {
454                         /* some thing unexpected */
455                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
456                                 break;
457
458                         /* pull from completion queue to leave more space */
459                         if (errno == EAGAIN)
460                                 pull_umem_cq(umem,
461                                              XSK_RING_CONS__DEFAULT_NUM_DESCS,
462                                              cq);
463                 }
464 }
465
466 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
467 static uint16_t
468 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
469 {
470         struct pkt_tx_queue *txq = queue;
471         struct xsk_umem_info *umem = txq->umem;
472         struct rte_mbuf *mbuf;
473         unsigned long tx_bytes = 0;
474         int i;
475         uint32_t idx_tx;
476         uint16_t count = 0;
477         struct xdp_desc *desc;
478         uint64_t addr, offset;
479         struct xsk_ring_cons *cq = &txq->pair->cq;
480         uint32_t free_thresh = cq->size >> 1;
481
482         if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
483                 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
484
485         for (i = 0; i < nb_pkts; i++) {
486                 mbuf = bufs[i];
487
488                 if (mbuf->pool == umem->mb_pool) {
489                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
490                                 kick_tx(txq, cq);
491                                 if (!xsk_ring_prod__reserve(&txq->tx, 1,
492                                                             &idx_tx))
493                                         goto out;
494                         }
495                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
496                         desc->len = mbuf->pkt_len;
497                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
498                                         umem->mb_pool->header_size;
499                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
500                                         (uint64_t)mbuf +
501                                         umem->mb_pool->header_size;
502                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
503                         desc->addr = addr | offset;
504                         count++;
505                 } else {
506                         struct rte_mbuf *local_mbuf =
507                                         rte_pktmbuf_alloc(umem->mb_pool);
508                         void *pkt;
509
510                         if (local_mbuf == NULL)
511                                 goto out;
512
513                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
514                                 rte_pktmbuf_free(local_mbuf);
515                                 kick_tx(txq, cq);
516                                 goto out;
517                         }
518
519                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
520                         desc->len = mbuf->pkt_len;
521
522                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
523                                         umem->mb_pool->header_size;
524                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
525                                         (uint64_t)local_mbuf +
526                                         umem->mb_pool->header_size;
527                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
528                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
529                         desc->addr = addr | offset;
530                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
531                                         desc->len);
532                         rte_pktmbuf_free(mbuf);
533                         count++;
534                 }
535
536                 tx_bytes += mbuf->pkt_len;
537         }
538
539         kick_tx(txq, cq);
540
541 out:
542         xsk_ring_prod__submit(&txq->tx, count);
543
544         txq->stats.tx_pkts += count;
545         txq->stats.tx_bytes += tx_bytes;
546         txq->stats.tx_dropped += nb_pkts - count;
547
548         return count;
549 }
550 #else
551 static uint16_t
552 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
553 {
554         struct pkt_tx_queue *txq = queue;
555         struct xsk_umem_info *umem = txq->umem;
556         struct rte_mbuf *mbuf;
557         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
558         unsigned long tx_bytes = 0;
559         int i;
560         uint32_t idx_tx;
561         struct xsk_ring_cons *cq = &txq->pair->cq;
562
563         pull_umem_cq(umem, nb_pkts, cq);
564
565         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
566                                         nb_pkts, NULL);
567         if (nb_pkts == 0)
568                 return 0;
569
570         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
571                 kick_tx(txq, cq);
572                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
573                 return 0;
574         }
575
576         for (i = 0; i < nb_pkts; i++) {
577                 struct xdp_desc *desc;
578                 void *pkt;
579
580                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
581                 mbuf = bufs[i];
582                 desc->len = mbuf->pkt_len;
583
584                 desc->addr = (uint64_t)addrs[i];
585                 pkt = xsk_umem__get_data(umem->mz->addr,
586                                          desc->addr);
587                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
588                 tx_bytes += mbuf->pkt_len;
589                 rte_pktmbuf_free(mbuf);
590         }
591
592         xsk_ring_prod__submit(&txq->tx, nb_pkts);
593
594         kick_tx(txq, cq);
595
596         txq->stats.tx_pkts += nb_pkts;
597         txq->stats.tx_bytes += tx_bytes;
598
599         return nb_pkts;
600 }
601
602 static uint16_t
603 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
604 {
605         uint16_t nb_tx;
606
607         if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
608                 return af_xdp_tx_cp(queue, bufs, nb_pkts);
609
610         nb_tx = 0;
611         while (nb_pkts) {
612                 uint16_t ret, n;
613
614                 /* Split larger batch into smaller batches of size
615                  * ETH_AF_XDP_TX_BATCH_SIZE or less.
616                  */
617                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
618                 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
619                 nb_tx = (uint16_t)(nb_tx + ret);
620                 nb_pkts = (uint16_t)(nb_pkts - ret);
621                 if (ret < n)
622                         break;
623         }
624
625         return nb_tx;
626 }
627 #endif
628
629 static uint16_t
630 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
631 {
632 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
633         return af_xdp_tx_zc(queue, bufs, nb_pkts);
634 #else
635         return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
636 #endif
637 }
638
639 static int
640 eth_dev_start(struct rte_eth_dev *dev)
641 {
642         dev->data->dev_link.link_status = ETH_LINK_UP;
643
644         return 0;
645 }
646
647 /* This function gets called when the current port gets stopped. */
648 static int
649 eth_dev_stop(struct rte_eth_dev *dev)
650 {
651         dev->data->dev_link.link_status = ETH_LINK_DOWN;
652         return 0;
653 }
654
655 /* Find ethdev in list */
656 static inline struct internal_list *
657 find_internal_resource(struct pmd_internals *port_int)
658 {
659         int found = 0;
660         struct internal_list *list = NULL;
661
662         if (port_int == NULL)
663                 return NULL;
664
665         pthread_mutex_lock(&internal_list_lock);
666
667         TAILQ_FOREACH(list, &internal_list, next) {
668                 struct pmd_internals *list_int =
669                                 list->eth_dev->data->dev_private;
670                 if (list_int == port_int) {
671                         found = 1;
672                         break;
673                 }
674         }
675
676         pthread_mutex_unlock(&internal_list_lock);
677
678         if (!found)
679                 return NULL;
680
681         return list;
682 }
683
684 /* Check if the netdev,qid context already exists */
685 static inline bool
686 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
687                 struct pkt_rx_queue *list_rxq, const char *list_ifname)
688 {
689         bool exists = false;
690
691         if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
692                         !strncmp(ifname, list_ifname, IFNAMSIZ)) {
693                 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
694                                         ifname, rxq->xsk_queue_idx);
695                 exists = true;
696         }
697
698         return exists;
699 }
700
701 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
702 static inline int
703 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
704                         struct xsk_umem_info **umem)
705 {
706         struct internal_list *list;
707         struct pmd_internals *internals;
708         int i = 0, ret = 0;
709         struct rte_mempool *mb_pool = rxq->mb_pool;
710
711         if (mb_pool == NULL)
712                 return ret;
713
714         pthread_mutex_lock(&internal_list_lock);
715
716         TAILQ_FOREACH(list, &internal_list, next) {
717                 internals = list->eth_dev->data->dev_private;
718                 for (i = 0; i < internals->queue_cnt; i++) {
719                         struct pkt_rx_queue *list_rxq =
720                                                 &internals->rx_queues[i];
721                         if (rxq == list_rxq)
722                                 continue;
723                         if (mb_pool == internals->rx_queues[i].mb_pool) {
724                                 if (ctx_exists(rxq, ifname, list_rxq,
725                                                 internals->if_name)) {
726                                         ret = -1;
727                                         goto out;
728                                 }
729                                 if (__atomic_load_n(
730                                         &internals->rx_queues[i].umem->refcnt,
731                                                         __ATOMIC_ACQUIRE)) {
732                                         *umem = internals->rx_queues[i].umem;
733                                         goto out;
734                                 }
735                         }
736                 }
737         }
738
739 out:
740         pthread_mutex_unlock(&internal_list_lock);
741
742         return ret;
743 }
744
745 static int
746 eth_dev_configure(struct rte_eth_dev *dev)
747 {
748         struct pmd_internals *internal = dev->data->dev_private;
749
750         /* rx/tx must be paired */
751         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
752                 return -EINVAL;
753
754         if (internal->shared_umem) {
755                 struct internal_list *list = NULL;
756                 const char *name = dev->device->name;
757
758                 /* Ensure PMD is not already inserted into the list */
759                 list = find_internal_resource(internal);
760                 if (list)
761                         return 0;
762
763                 list = rte_zmalloc_socket(name, sizeof(*list), 0,
764                                         dev->device->numa_node);
765                 if (list == NULL)
766                         return -1;
767
768                 list->eth_dev = dev;
769                 pthread_mutex_lock(&internal_list_lock);
770                 TAILQ_INSERT_TAIL(&internal_list, list, next);
771                 pthread_mutex_unlock(&internal_list_lock);
772         }
773
774         return 0;
775 }
776
777 static int
778 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
779 {
780         struct pmd_internals *internals = dev->data->dev_private;
781
782         dev_info->if_index = internals->if_index;
783         dev_info->max_mac_addrs = 1;
784         dev_info->max_rx_pktlen = ETH_FRAME_LEN;
785         dev_info->max_rx_queues = internals->queue_cnt;
786         dev_info->max_tx_queues = internals->queue_cnt;
787
788         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
789 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
790         dev_info->max_mtu = getpagesize() -
791                                 sizeof(struct rte_mempool_objhdr) -
792                                 sizeof(struct rte_mbuf) -
793                                 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
794 #else
795         dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
796 #endif
797
798         dev_info->default_rxportconf.nb_queues = 1;
799         dev_info->default_txportconf.nb_queues = 1;
800         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
801         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
802
803         return 0;
804 }
805
806 static int
807 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
808 {
809         struct pmd_internals *internals = dev->data->dev_private;
810         struct xdp_statistics xdp_stats;
811         struct pkt_rx_queue *rxq;
812         struct pkt_tx_queue *txq;
813         socklen_t optlen;
814         int i, ret;
815
816         for (i = 0; i < dev->data->nb_rx_queues; i++) {
817                 optlen = sizeof(struct xdp_statistics);
818                 rxq = &internals->rx_queues[i];
819                 txq = rxq->pair;
820                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
821                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
822
823                 stats->q_opackets[i] = txq->stats.tx_pkts;
824                 stats->q_obytes[i] = txq->stats.tx_bytes;
825
826                 stats->ipackets += stats->q_ipackets[i];
827                 stats->ibytes += stats->q_ibytes[i];
828                 stats->imissed += rxq->stats.rx_dropped;
829                 stats->oerrors += txq->stats.tx_dropped;
830                 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
831                                 XDP_STATISTICS, &xdp_stats, &optlen);
832                 if (ret != 0) {
833                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
834                         return -1;
835                 }
836                 stats->imissed += xdp_stats.rx_dropped;
837
838                 stats->opackets += stats->q_opackets[i];
839                 stats->obytes += stats->q_obytes[i];
840         }
841
842         return 0;
843 }
844
845 static int
846 eth_stats_reset(struct rte_eth_dev *dev)
847 {
848         struct pmd_internals *internals = dev->data->dev_private;
849         int i;
850
851         for (i = 0; i < internals->queue_cnt; i++) {
852                 memset(&internals->rx_queues[i].stats, 0,
853                                         sizeof(struct rx_stats));
854                 memset(&internals->tx_queues[i].stats, 0,
855                                         sizeof(struct tx_stats));
856         }
857
858         return 0;
859 }
860
861 static void
862 remove_xdp_program(struct pmd_internals *internals)
863 {
864         uint32_t curr_prog_id = 0;
865
866         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
867                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
868                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
869                 return;
870         }
871         bpf_set_link_xdp_fd(internals->if_index, -1,
872                         XDP_FLAGS_UPDATE_IF_NOEXIST);
873 }
874
875 static void
876 xdp_umem_destroy(struct xsk_umem_info *umem)
877 {
878 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
879         umem->mb_pool = NULL;
880 #else
881         rte_memzone_free(umem->mz);
882         umem->mz = NULL;
883
884         rte_ring_free(umem->buf_ring);
885         umem->buf_ring = NULL;
886 #endif
887
888         rte_free(umem);
889 }
890
891 static int
892 eth_dev_close(struct rte_eth_dev *dev)
893 {
894         struct pmd_internals *internals = dev->data->dev_private;
895         struct pkt_rx_queue *rxq;
896         int i;
897
898         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
899                 return 0;
900
901         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
902                 rte_socket_id());
903
904         for (i = 0; i < internals->queue_cnt; i++) {
905                 rxq = &internals->rx_queues[i];
906                 if (rxq->umem == NULL)
907                         break;
908                 xsk_socket__delete(rxq->xsk);
909
910                 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
911                                 == 0) {
912                         (void)xsk_umem__delete(rxq->umem->umem);
913                         xdp_umem_destroy(rxq->umem);
914                 }
915
916                 /* free pkt_tx_queue */
917                 rte_free(rxq->pair);
918                 rte_free(rxq);
919         }
920
921         /*
922          * MAC is not allocated dynamically, setting it to NULL would prevent
923          * from releasing it in rte_eth_dev_release_port.
924          */
925         dev->data->mac_addrs = NULL;
926
927         remove_xdp_program(internals);
928
929         if (internals->shared_umem) {
930                 struct internal_list *list;
931
932                 /* Remove ethdev from list used to track and share UMEMs */
933                 list = find_internal_resource(internals);
934                 if (list) {
935                         pthread_mutex_lock(&internal_list_lock);
936                         TAILQ_REMOVE(&internal_list, list, next);
937                         pthread_mutex_unlock(&internal_list_lock);
938                         rte_free(list);
939                 }
940         }
941
942         return 0;
943 }
944
945 static void
946 eth_queue_release(void *q __rte_unused)
947 {
948 }
949
950 static int
951 eth_link_update(struct rte_eth_dev *dev __rte_unused,
952                 int wait_to_complete __rte_unused)
953 {
954         return 0;
955 }
956
957 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
958 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
959 {
960         struct rte_mempool_memhdr *memhdr;
961         uintptr_t memhdr_addr, aligned_addr;
962
963         memhdr = STAILQ_FIRST(&mp->mem_list);
964         memhdr_addr = (uintptr_t)memhdr->addr;
965         aligned_addr = memhdr_addr & ~(getpagesize() - 1);
966         *align = memhdr_addr - aligned_addr;
967
968         return aligned_addr;
969 }
970
971 static struct
972 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
973                                   struct pkt_rx_queue *rxq)
974 {
975         struct xsk_umem_info *umem = NULL;
976         int ret;
977         struct xsk_umem_config usr_config = {
978                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
979                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
980                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
981         void *base_addr = NULL;
982         struct rte_mempool *mb_pool = rxq->mb_pool;
983         uint64_t umem_size, align = 0;
984
985         if (internals->shared_umem) {
986                 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
987                         return NULL;
988
989                 if (umem != NULL &&
990                         __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
991                                         umem->max_xsks) {
992                         AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
993                                         internals->if_name, rxq->xsk_queue_idx);
994                         __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
995                 }
996         }
997
998         if (umem == NULL) {
999                 usr_config.frame_size =
1000                         rte_mempool_calc_obj_size(mb_pool->elt_size,
1001                                                   mb_pool->flags, NULL);
1002                 usr_config.frame_headroom = mb_pool->header_size +
1003                                                 sizeof(struct rte_mbuf) +
1004                                                 rte_pktmbuf_priv_size(mb_pool) +
1005                                                 RTE_PKTMBUF_HEADROOM;
1006
1007                 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1008                                           rte_socket_id());
1009                 if (umem == NULL) {
1010                         AF_XDP_LOG(ERR, "Failed to allocate umem info");
1011                         return NULL;
1012                 }
1013
1014                 umem->mb_pool = mb_pool;
1015                 base_addr = (void *)get_base_addr(mb_pool, &align);
1016                 umem_size = (uint64_t)mb_pool->populated_size *
1017                                 (uint64_t)usr_config.frame_size +
1018                                 align;
1019
1020                 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1021                                 &rxq->fq, &rxq->cq, &usr_config);
1022                 if (ret) {
1023                         AF_XDP_LOG(ERR, "Failed to create umem");
1024                         goto err;
1025                 }
1026                 umem->buffer = base_addr;
1027
1028                 if (internals->shared_umem) {
1029                         umem->max_xsks = mb_pool->populated_size /
1030                                                 ETH_AF_XDP_NUM_BUFFERS;
1031                         AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1032                                                 mb_pool->name, umem->max_xsks);
1033                 }
1034
1035                 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1036         }
1037
1038 #else
1039 static struct
1040 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1041                                   struct pkt_rx_queue *rxq)
1042 {
1043         struct xsk_umem_info *umem;
1044         const struct rte_memzone *mz;
1045         struct xsk_umem_config usr_config = {
1046                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1047                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1048                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1049                 .frame_headroom = 0 };
1050         char ring_name[RTE_RING_NAMESIZE];
1051         char mz_name[RTE_MEMZONE_NAMESIZE];
1052         int ret;
1053         uint64_t i;
1054
1055         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1056         if (umem == NULL) {
1057                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1058                 return NULL;
1059         }
1060
1061         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1062                        internals->if_name, rxq->xsk_queue_idx);
1063         umem->buf_ring = rte_ring_create(ring_name,
1064                                          ETH_AF_XDP_NUM_BUFFERS,
1065                                          rte_socket_id(),
1066                                          0x0);
1067         if (umem->buf_ring == NULL) {
1068                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1069                 goto err;
1070         }
1071
1072         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1073                 rte_ring_enqueue(umem->buf_ring,
1074                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1075
1076         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1077                        internals->if_name, rxq->xsk_queue_idx);
1078         mz = rte_memzone_reserve_aligned(mz_name,
1079                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1080                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1081                         getpagesize());
1082         if (mz == NULL) {
1083                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1084                 goto err;
1085         }
1086
1087         ret = xsk_umem__create(&umem->umem, mz->addr,
1088                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1089                                &rxq->fq, &rxq->cq,
1090                                &usr_config);
1091
1092         if (ret) {
1093                 AF_XDP_LOG(ERR, "Failed to create umem");
1094                 goto err;
1095         }
1096         umem->mz = mz;
1097
1098 #endif
1099         return umem;
1100
1101 err:
1102         xdp_umem_destroy(umem);
1103         return NULL;
1104 }
1105
1106 static int
1107 load_custom_xdp_prog(const char *prog_path, int if_index)
1108 {
1109         int ret, prog_fd = -1;
1110         struct bpf_object *obj;
1111         struct bpf_map *map;
1112
1113         ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1114         if (ret) {
1115                 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1116                 return ret;
1117         }
1118
1119         /*
1120          * The loaded program must provision for a map of xsks, such that some
1121          * traffic can be redirected to userspace. When the xsk is created,
1122          * libbpf inserts it into the map.
1123          */
1124         map = bpf_object__find_map_by_name(obj, "xsks_map");
1125         if (!map) {
1126                 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1127                 return -1;
1128         }
1129
1130         /* Link the program with the given network device */
1131         ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1132                                         XDP_FLAGS_UPDATE_IF_NOEXIST);
1133         if (ret) {
1134                 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1135                                 prog_fd);
1136                 return -1;
1137         }
1138
1139         AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1140                                 prog_path, prog_fd);
1141
1142         return 0;
1143 }
1144
1145 static int
1146 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1147               int ring_size)
1148 {
1149         struct xsk_socket_config cfg;
1150         struct pkt_tx_queue *txq = rxq->pair;
1151         int ret = 0;
1152         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1153         struct rte_mbuf *fq_bufs[reserve_size];
1154
1155         rxq->umem = xdp_umem_configure(internals, rxq);
1156         if (rxq->umem == NULL)
1157                 return -ENOMEM;
1158         txq->umem = rxq->umem;
1159
1160         cfg.rx_size = ring_size;
1161         cfg.tx_size = ring_size;
1162         cfg.libbpf_flags = 0;
1163         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1164         cfg.bind_flags = 0;
1165
1166 #if defined(XDP_USE_NEED_WAKEUP)
1167         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1168 #endif
1169
1170         if (strnlen(internals->prog_path, PATH_MAX) &&
1171                                 !internals->custom_prog_configured) {
1172                 ret = load_custom_xdp_prog(internals->prog_path,
1173                                            internals->if_index);
1174                 if (ret) {
1175                         AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1176                                         internals->prog_path);
1177                         goto err;
1178                 }
1179                 internals->custom_prog_configured = 1;
1180         }
1181
1182         if (internals->shared_umem)
1183                 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1184                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1185                                 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1186         else
1187                 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1188                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1189                                 &txq->tx, &cfg);
1190
1191         if (ret) {
1192                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1193                 goto err;
1194         }
1195
1196 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1197         ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1198         if (ret) {
1199                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1200                 goto err;
1201         }
1202 #endif
1203         ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1204         if (ret) {
1205                 xsk_socket__delete(rxq->xsk);
1206                 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1207                 goto err;
1208         }
1209
1210         return 0;
1211
1212 err:
1213         if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1214                 xdp_umem_destroy(rxq->umem);
1215
1216         return ret;
1217 }
1218
1219 static int
1220 eth_rx_queue_setup(struct rte_eth_dev *dev,
1221                    uint16_t rx_queue_id,
1222                    uint16_t nb_rx_desc,
1223                    unsigned int socket_id __rte_unused,
1224                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1225                    struct rte_mempool *mb_pool)
1226 {
1227         struct pmd_internals *internals = dev->data->dev_private;
1228         struct pkt_rx_queue *rxq;
1229         int ret;
1230
1231         rxq = &internals->rx_queues[rx_queue_id];
1232
1233         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1234                    rx_queue_id, rxq->xsk_queue_idx);
1235
1236 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1237         uint32_t buf_size, data_size;
1238
1239         /* Now get the space available for data in the mbuf */
1240         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1241                 RTE_PKTMBUF_HEADROOM;
1242         data_size = ETH_AF_XDP_FRAME_SIZE;
1243
1244         if (data_size > buf_size) {
1245                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1246                         dev->device->name, data_size, buf_size);
1247                 ret = -ENOMEM;
1248                 goto err;
1249         }
1250 #endif
1251
1252         rxq->mb_pool = mb_pool;
1253
1254         if (xsk_configure(internals, rxq, nb_rx_desc)) {
1255                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1256                 ret = -EINVAL;
1257                 goto err;
1258         }
1259
1260         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1261         rxq->fds[0].events = POLLIN;
1262
1263         dev->data->rx_queues[rx_queue_id] = rxq;
1264         return 0;
1265
1266 err:
1267         return ret;
1268 }
1269
1270 static int
1271 eth_tx_queue_setup(struct rte_eth_dev *dev,
1272                    uint16_t tx_queue_id,
1273                    uint16_t nb_tx_desc __rte_unused,
1274                    unsigned int socket_id __rte_unused,
1275                    const struct rte_eth_txconf *tx_conf __rte_unused)
1276 {
1277         struct pmd_internals *internals = dev->data->dev_private;
1278         struct pkt_tx_queue *txq;
1279
1280         txq = &internals->tx_queues[tx_queue_id];
1281
1282         dev->data->tx_queues[tx_queue_id] = txq;
1283         return 0;
1284 }
1285
1286 static int
1287 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1288 {
1289         struct pmd_internals *internals = dev->data->dev_private;
1290         struct ifreq ifr = { .ifr_mtu = mtu };
1291         int ret;
1292         int s;
1293
1294         s = socket(PF_INET, SOCK_DGRAM, 0);
1295         if (s < 0)
1296                 return -EINVAL;
1297
1298         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1299         ret = ioctl(s, SIOCSIFMTU, &ifr);
1300         close(s);
1301
1302         return (ret < 0) ? -errno : 0;
1303 }
1304
1305 static int
1306 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1307 {
1308         struct ifreq ifr;
1309         int ret = 0;
1310         int s;
1311
1312         s = socket(PF_INET, SOCK_DGRAM, 0);
1313         if (s < 0)
1314                 return -errno;
1315
1316         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1317         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1318                 ret = -errno;
1319                 goto out;
1320         }
1321         ifr.ifr_flags &= mask;
1322         ifr.ifr_flags |= flags;
1323         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1324                 ret = -errno;
1325                 goto out;
1326         }
1327 out:
1328         close(s);
1329         return ret;
1330 }
1331
1332 static int
1333 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1334 {
1335         struct pmd_internals *internals = dev->data->dev_private;
1336
1337         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1338 }
1339
1340 static int
1341 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1342 {
1343         struct pmd_internals *internals = dev->data->dev_private;
1344
1345         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1346 }
1347
1348 static const struct eth_dev_ops ops = {
1349         .dev_start = eth_dev_start,
1350         .dev_stop = eth_dev_stop,
1351         .dev_close = eth_dev_close,
1352         .dev_configure = eth_dev_configure,
1353         .dev_infos_get = eth_dev_info,
1354         .mtu_set = eth_dev_mtu_set,
1355         .promiscuous_enable = eth_dev_promiscuous_enable,
1356         .promiscuous_disable = eth_dev_promiscuous_disable,
1357         .rx_queue_setup = eth_rx_queue_setup,
1358         .tx_queue_setup = eth_tx_queue_setup,
1359         .rx_queue_release = eth_queue_release,
1360         .tx_queue_release = eth_queue_release,
1361         .link_update = eth_link_update,
1362         .stats_get = eth_stats_get,
1363         .stats_reset = eth_stats_reset,
1364 };
1365
1366 /** parse integer from integer argument */
1367 static int
1368 parse_integer_arg(const char *key __rte_unused,
1369                   const char *value, void *extra_args)
1370 {
1371         int *i = (int *)extra_args;
1372         char *end;
1373
1374         *i = strtol(value, &end, 10);
1375         if (*i < 0) {
1376                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1377                 return -EINVAL;
1378         }
1379
1380         return 0;
1381 }
1382
1383 /** parse name argument */
1384 static int
1385 parse_name_arg(const char *key __rte_unused,
1386                const char *value, void *extra_args)
1387 {
1388         char *name = extra_args;
1389
1390         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1391                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1392                            value, IFNAMSIZ);
1393                 return -EINVAL;
1394         }
1395
1396         strlcpy(name, value, IFNAMSIZ);
1397
1398         return 0;
1399 }
1400
1401 /** parse xdp prog argument */
1402 static int
1403 parse_prog_arg(const char *key __rte_unused,
1404                const char *value, void *extra_args)
1405 {
1406         char *path = extra_args;
1407
1408         if (strnlen(value, PATH_MAX) == PATH_MAX) {
1409                 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1410                            value, PATH_MAX);
1411                 return -EINVAL;
1412         }
1413
1414         if (access(value, F_OK) != 0) {
1415                 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1416                            value, strerror(errno));
1417                 return -EINVAL;
1418         }
1419
1420         strlcpy(path, value, PATH_MAX);
1421
1422         return 0;
1423 }
1424
1425 static int
1426 xdp_get_channels_info(const char *if_name, int *max_queues,
1427                                 int *combined_queues)
1428 {
1429         struct ethtool_channels channels;
1430         struct ifreq ifr;
1431         int fd, ret;
1432
1433         fd = socket(AF_INET, SOCK_DGRAM, 0);
1434         if (fd < 0)
1435                 return -1;
1436
1437         channels.cmd = ETHTOOL_GCHANNELS;
1438         ifr.ifr_data = (void *)&channels;
1439         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1440         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1441         if (ret) {
1442                 if (errno == EOPNOTSUPP) {
1443                         ret = 0;
1444                 } else {
1445                         ret = -errno;
1446                         goto out;
1447                 }
1448         }
1449
1450         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1451                 /* If the device says it has no channels, then all traffic
1452                  * is sent to a single stream, so max queues = 1.
1453                  */
1454                 *max_queues = 1;
1455                 *combined_queues = 1;
1456         } else {
1457                 *max_queues = channels.max_combined;
1458                 *combined_queues = channels.combined_count;
1459         }
1460
1461  out:
1462         close(fd);
1463         return ret;
1464 }
1465
1466 static int
1467 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1468                         int *queue_cnt, int *shared_umem, char *prog_path)
1469 {
1470         int ret;
1471
1472         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1473                                  &parse_name_arg, if_name);
1474         if (ret < 0)
1475                 goto free_kvlist;
1476
1477         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1478                                  &parse_integer_arg, start_queue);
1479         if (ret < 0)
1480                 goto free_kvlist;
1481
1482         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1483                                  &parse_integer_arg, queue_cnt);
1484         if (ret < 0 || *queue_cnt <= 0) {
1485                 ret = -EINVAL;
1486                 goto free_kvlist;
1487         }
1488
1489         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1490                                 &parse_integer_arg, shared_umem);
1491         if (ret < 0)
1492                 goto free_kvlist;
1493
1494         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1495                                  &parse_prog_arg, prog_path);
1496         if (ret < 0)
1497                 goto free_kvlist;
1498
1499 free_kvlist:
1500         rte_kvargs_free(kvlist);
1501         return ret;
1502 }
1503
1504 static int
1505 get_iface_info(const char *if_name,
1506                struct rte_ether_addr *eth_addr,
1507                int *if_index)
1508 {
1509         struct ifreq ifr;
1510         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1511
1512         if (sock < 0)
1513                 return -1;
1514
1515         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1516         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1517                 goto error;
1518
1519         *if_index = ifr.ifr_ifindex;
1520
1521         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1522                 goto error;
1523
1524         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1525
1526         close(sock);
1527         return 0;
1528
1529 error:
1530         close(sock);
1531         return -1;
1532 }
1533
1534 static struct rte_eth_dev *
1535 init_internals(struct rte_vdev_device *dev, const char *if_name,
1536                 int start_queue_idx, int queue_cnt, int shared_umem,
1537                 const char *prog_path)
1538 {
1539         const char *name = rte_vdev_device_name(dev);
1540         const unsigned int numa_node = dev->device.numa_node;
1541         struct pmd_internals *internals;
1542         struct rte_eth_dev *eth_dev;
1543         int ret;
1544         int i;
1545
1546         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1547         if (internals == NULL)
1548                 return NULL;
1549
1550         internals->start_queue_idx = start_queue_idx;
1551         internals->queue_cnt = queue_cnt;
1552         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1553         strlcpy(internals->prog_path, prog_path, PATH_MAX);
1554         internals->custom_prog_configured = 0;
1555
1556 #ifndef ETH_AF_XDP_SHARED_UMEM
1557         if (shared_umem) {
1558                 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1559                                 "Check kernel and libbpf version\n");
1560                 goto err_free_internals;
1561         }
1562 #endif
1563         internals->shared_umem = shared_umem;
1564
1565         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1566                                   &internals->combined_queue_cnt)) {
1567                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1568                                 if_name);
1569                 goto err_free_internals;
1570         }
1571
1572         if (queue_cnt > internals->combined_queue_cnt) {
1573                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1574                                 queue_cnt, internals->combined_queue_cnt);
1575                 goto err_free_internals;
1576         }
1577
1578         internals->rx_queues = rte_zmalloc_socket(NULL,
1579                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1580                                         0, numa_node);
1581         if (internals->rx_queues == NULL) {
1582                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1583                 goto err_free_internals;
1584         }
1585
1586         internals->tx_queues = rte_zmalloc_socket(NULL,
1587                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1588                                         0, numa_node);
1589         if (internals->tx_queues == NULL) {
1590                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1591                 goto err_free_rx;
1592         }
1593         for (i = 0; i < queue_cnt; i++) {
1594                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1595                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1596                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1597                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1598         }
1599
1600         ret = get_iface_info(if_name, &internals->eth_addr,
1601                              &internals->if_index);
1602         if (ret)
1603                 goto err_free_tx;
1604
1605         eth_dev = rte_eth_vdev_allocate(dev, 0);
1606         if (eth_dev == NULL)
1607                 goto err_free_tx;
1608
1609         eth_dev->data->dev_private = internals;
1610         eth_dev->data->dev_link = pmd_link;
1611         eth_dev->data->mac_addrs = &internals->eth_addr;
1612         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1613         eth_dev->dev_ops = &ops;
1614         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1615         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1616
1617 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1618         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1619 #endif
1620
1621         return eth_dev;
1622
1623 err_free_tx:
1624         rte_free(internals->tx_queues);
1625 err_free_rx:
1626         rte_free(internals->rx_queues);
1627 err_free_internals:
1628         rte_free(internals);
1629         return NULL;
1630 }
1631
1632 static int
1633 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1634 {
1635         struct rte_kvargs *kvlist;
1636         char if_name[IFNAMSIZ] = {'\0'};
1637         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1638         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1639         int shared_umem = 0;
1640         char prog_path[PATH_MAX] = {'\0'};
1641         struct rte_eth_dev *eth_dev = NULL;
1642         const char *name;
1643
1644         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1645                 rte_vdev_device_name(dev));
1646
1647         name = rte_vdev_device_name(dev);
1648         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1649                 strlen(rte_vdev_device_args(dev)) == 0) {
1650                 eth_dev = rte_eth_dev_attach_secondary(name);
1651                 if (eth_dev == NULL) {
1652                         AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1653                         return -EINVAL;
1654                 }
1655                 eth_dev->dev_ops = &ops;
1656                 rte_eth_dev_probing_finish(eth_dev);
1657                 return 0;
1658         }
1659
1660         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1661         if (kvlist == NULL) {
1662                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1663                 return -EINVAL;
1664         }
1665
1666         if (dev->device.numa_node == SOCKET_ID_ANY)
1667                 dev->device.numa_node = rte_socket_id();
1668
1669         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1670                              &xsk_queue_cnt, &shared_umem, prog_path) < 0) {
1671                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1672                 return -EINVAL;
1673         }
1674
1675         if (strlen(if_name) == 0) {
1676                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1677                 return -EINVAL;
1678         }
1679
1680         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1681                                         xsk_queue_cnt, shared_umem, prog_path);
1682         if (eth_dev == NULL) {
1683                 AF_XDP_LOG(ERR, "Failed to init internals\n");
1684                 return -1;
1685         }
1686
1687         rte_eth_dev_probing_finish(eth_dev);
1688
1689         return 0;
1690 }
1691
1692 static int
1693 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1694 {
1695         struct rte_eth_dev *eth_dev = NULL;
1696
1697         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1698                 rte_socket_id());
1699
1700         if (dev == NULL)
1701                 return -1;
1702
1703         /* find the ethdev entry */
1704         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1705         if (eth_dev == NULL)
1706                 return 0;
1707
1708         eth_dev_close(eth_dev);
1709         rte_eth_dev_release_port(eth_dev);
1710
1711
1712         return 0;
1713 }
1714
1715 static struct rte_vdev_driver pmd_af_xdp_drv = {
1716         .probe = rte_pmd_af_xdp_probe,
1717         .remove = rte_pmd_af_xdp_remove,
1718 };
1719
1720 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1721 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1722                               "iface=<string> "
1723                               "start_queue=<int> "
1724                               "queue_count=<int> "
1725                               "shared_umem=<int> "
1726                               "xdp_prog=<string> ");