1b6192fa443ae3ee631b09fb703f5b082df791ad
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <netinet/in.h>
9 #include <net/if.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include "af_xdp_deps.h"
18 #include <bpf/bpf.h>
19
20 #include <rte_ethdev.h>
21 #include <ethdev_driver.h>
22 #include <ethdev_vdev.h>
23 #include <rte_kvargs.h>
24 #include <rte_bus_vdev.h>
25 #include <rte_string_fns.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_common.h>
28 #include <rte_dev.h>
29 #include <rte_eal.h>
30 #include <rte_ether.h>
31 #include <rte_lcore.h>
32 #include <rte_log.h>
33 #include <rte_memory.h>
34 #include <rte_memzone.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ring.h>
39 #include <rte_spinlock.h>
40 #include <rte_power_intrinsics.h>
41
42 #include "compat.h"
43
44 #ifndef SO_PREFER_BUSY_POLL
45 #define SO_PREFER_BUSY_POLL 69
46 #endif
47 #ifndef SO_BUSY_POLL_BUDGET
48 #define SO_BUSY_POLL_BUDGET 70
49 #endif
50
51
52 #ifndef SOL_XDP
53 #define SOL_XDP 283
54 #endif
55
56 #ifndef AF_XDP
57 #define AF_XDP 44
58 #endif
59
60 #ifndef PF_XDP
61 #define PF_XDP AF_XDP
62 #endif
63
64 RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
65
66 #define AF_XDP_LOG(level, fmt, args...)                 \
67         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
68                 "%s(): " fmt, __func__, ##args)
69
70 #define ETH_AF_XDP_FRAME_SIZE           2048
71 #define ETH_AF_XDP_NUM_BUFFERS          4096
72 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
73 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
74 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
75 #define ETH_AF_XDP_DFLT_BUSY_BUDGET     64
76 #define ETH_AF_XDP_DFLT_BUSY_TIMEOUT    20
77
78 #define ETH_AF_XDP_RX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
79 #define ETH_AF_XDP_TX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
80
81 #define ETH_AF_XDP_ETH_OVERHEAD         (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
82
83 struct xsk_umem_info {
84         struct xsk_umem *umem;
85         struct rte_ring *buf_ring;
86         const struct rte_memzone *mz;
87         struct rte_mempool *mb_pool;
88         void *buffer;
89         uint8_t refcnt;
90         uint32_t max_xsks;
91 };
92
93 struct rx_stats {
94         uint64_t rx_pkts;
95         uint64_t rx_bytes;
96         uint64_t rx_dropped;
97 };
98
99 struct pkt_rx_queue {
100         struct xsk_ring_cons rx;
101         struct xsk_umem_info *umem;
102         struct xsk_socket *xsk;
103         struct rte_mempool *mb_pool;
104
105         struct rx_stats stats;
106
107         struct xsk_ring_prod fq;
108         struct xsk_ring_cons cq;
109
110         struct pkt_tx_queue *pair;
111         struct pollfd fds[1];
112         int xsk_queue_idx;
113         int busy_budget;
114 };
115
116 struct tx_stats {
117         uint64_t tx_pkts;
118         uint64_t tx_bytes;
119         uint64_t tx_dropped;
120 };
121
122 struct pkt_tx_queue {
123         struct xsk_ring_prod tx;
124         struct xsk_umem_info *umem;
125
126         struct tx_stats stats;
127
128         struct pkt_rx_queue *pair;
129         int xsk_queue_idx;
130 };
131
132 struct pmd_internals {
133         int if_index;
134         char if_name[IFNAMSIZ];
135         int start_queue_idx;
136         int queue_cnt;
137         int max_queue_cnt;
138         int combined_queue_cnt;
139         bool shared_umem;
140         char prog_path[PATH_MAX];
141         bool custom_prog_configured;
142         struct bpf_map *map;
143
144         struct rte_ether_addr eth_addr;
145
146         struct pkt_rx_queue *rx_queues;
147         struct pkt_tx_queue *tx_queues;
148 };
149
150 #define ETH_AF_XDP_IFACE_ARG                    "iface"
151 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
152 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
153 #define ETH_AF_XDP_SHARED_UMEM_ARG              "shared_umem"
154 #define ETH_AF_XDP_PROG_ARG                     "xdp_prog"
155 #define ETH_AF_XDP_BUDGET_ARG                   "busy_budget"
156
157 static const char * const valid_arguments[] = {
158         ETH_AF_XDP_IFACE_ARG,
159         ETH_AF_XDP_START_QUEUE_ARG,
160         ETH_AF_XDP_QUEUE_COUNT_ARG,
161         ETH_AF_XDP_SHARED_UMEM_ARG,
162         ETH_AF_XDP_PROG_ARG,
163         ETH_AF_XDP_BUDGET_ARG,
164         NULL
165 };
166
167 static const struct rte_eth_link pmd_link = {
168         .link_speed = RTE_ETH_SPEED_NUM_10G,
169         .link_duplex = RTE_ETH_LINK_FULL_DUPLEX,
170         .link_status = RTE_ETH_LINK_DOWN,
171         .link_autoneg = RTE_ETH_LINK_AUTONEG
172 };
173
174 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
175 struct internal_list {
176         TAILQ_ENTRY(internal_list) next;
177         struct rte_eth_dev *eth_dev;
178 };
179
180 TAILQ_HEAD(internal_list_head, internal_list);
181 static struct internal_list_head internal_list =
182         TAILQ_HEAD_INITIALIZER(internal_list);
183
184 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
185
186 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
187 static inline int
188 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
189                       struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
190 {
191         uint32_t idx;
192         uint16_t i;
193
194         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
195                 for (i = 0; i < reserve_size; i++)
196                         rte_pktmbuf_free(bufs[i]);
197                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
198                 return -1;
199         }
200
201         for (i = 0; i < reserve_size; i++) {
202                 __u64 *fq_addr;
203                 uint64_t addr;
204
205                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
206                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
207                                 umem->mb_pool->header_size;
208                 *fq_addr = addr;
209         }
210
211         xsk_ring_prod__submit(fq, reserve_size);
212
213         return 0;
214 }
215 #else
216 static inline int
217 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
218                       struct rte_mbuf **bufs __rte_unused,
219                       struct xsk_ring_prod *fq)
220 {
221         void *addrs[reserve_size];
222         uint32_t idx;
223         uint16_t i;
224
225         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
226                     != reserve_size) {
227                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
228                 return -1;
229         }
230
231         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
232                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
233                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
234                                 reserve_size, NULL);
235                 return -1;
236         }
237
238         for (i = 0; i < reserve_size; i++) {
239                 __u64 *fq_addr;
240
241                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
242                 *fq_addr = (uint64_t)addrs[i];
243         }
244
245         xsk_ring_prod__submit(fq, reserve_size);
246
247         return 0;
248 }
249 #endif
250
251 static inline int
252 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
253                    struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
254 {
255 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
256         return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
257 #else
258         return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
259 #endif
260 }
261
262 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
263 static uint16_t
264 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
265 {
266         struct pkt_rx_queue *rxq = queue;
267         struct xsk_ring_cons *rx = &rxq->rx;
268         struct xsk_ring_prod *fq = &rxq->fq;
269         struct xsk_umem_info *umem = rxq->umem;
270         uint32_t idx_rx = 0;
271         unsigned long rx_bytes = 0;
272         int i;
273         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
274
275         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
276
277         if (nb_pkts == 0) {
278                 /* we can assume a kernel >= 5.11 is in use if busy polling is
279                  * enabled and thus we can safely use the recvfrom() syscall
280                  * which is only supported for AF_XDP sockets in kernels >=
281                  * 5.11.
282                  */
283                 if (rxq->busy_budget) {
284                         (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
285                                        MSG_DONTWAIT, NULL, NULL);
286                 } else if (xsk_ring_prod__needs_wakeup(fq)) {
287                         (void)poll(&rxq->fds[0], 1, 1000);
288                 }
289
290                 return 0;
291         }
292
293         /* allocate bufs for fill queue replenishment after rx */
294         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
295                 AF_XDP_LOG(DEBUG,
296                         "Failed to get enough buffers for fq.\n");
297                 /* rollback cached_cons which is added by
298                  * xsk_ring_cons__peek
299                  */
300                 rx->cached_cons -= nb_pkts;
301                 return 0;
302         }
303
304         for (i = 0; i < nb_pkts; i++) {
305                 const struct xdp_desc *desc;
306                 uint64_t addr;
307                 uint32_t len;
308                 uint64_t offset;
309
310                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
311                 addr = desc->addr;
312                 len = desc->len;
313
314                 offset = xsk_umem__extract_offset(addr);
315                 addr = xsk_umem__extract_addr(addr);
316
317                 bufs[i] = (struct rte_mbuf *)
318                                 xsk_umem__get_data(umem->buffer, addr +
319                                         umem->mb_pool->header_size);
320                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
321                         rte_pktmbuf_priv_size(umem->mb_pool) -
322                         umem->mb_pool->header_size;
323
324                 rte_pktmbuf_pkt_len(bufs[i]) = len;
325                 rte_pktmbuf_data_len(bufs[i]) = len;
326                 rx_bytes += len;
327         }
328
329         xsk_ring_cons__release(rx, nb_pkts);
330         (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
331
332         /* statistics */
333         rxq->stats.rx_pkts += nb_pkts;
334         rxq->stats.rx_bytes += rx_bytes;
335
336         return nb_pkts;
337 }
338 #else
339 static uint16_t
340 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
341 {
342         struct pkt_rx_queue *rxq = queue;
343         struct xsk_ring_cons *rx = &rxq->rx;
344         struct xsk_umem_info *umem = rxq->umem;
345         struct xsk_ring_prod *fq = &rxq->fq;
346         uint32_t idx_rx = 0;
347         unsigned long rx_bytes = 0;
348         int i;
349         uint32_t free_thresh = fq->size >> 1;
350         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
351
352         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
353                 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
354
355         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
356         if (nb_pkts == 0) {
357 #if defined(XDP_USE_NEED_WAKEUP)
358                 if (xsk_ring_prod__needs_wakeup(fq))
359                         (void)poll(rxq->fds, 1, 1000);
360 #endif
361                 return 0;
362         }
363
364         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
365                 /* rollback cached_cons which is added by
366                  * xsk_ring_cons__peek
367                  */
368                 rx->cached_cons -= nb_pkts;
369                 return 0;
370         }
371
372         for (i = 0; i < nb_pkts; i++) {
373                 const struct xdp_desc *desc;
374                 uint64_t addr;
375                 uint32_t len;
376                 void *pkt;
377
378                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
379                 addr = desc->addr;
380                 len = desc->len;
381                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
382
383                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
384                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
385                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
386                 rte_pktmbuf_data_len(mbufs[i]) = len;
387                 rx_bytes += len;
388                 bufs[i] = mbufs[i];
389         }
390
391         xsk_ring_cons__release(rx, nb_pkts);
392
393         /* statistics */
394         rxq->stats.rx_pkts += nb_pkts;
395         rxq->stats.rx_bytes += rx_bytes;
396
397         return nb_pkts;
398 }
399 #endif
400
401 static uint16_t
402 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
403 {
404 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
405         return af_xdp_rx_zc(queue, bufs, nb_pkts);
406 #else
407         return af_xdp_rx_cp(queue, bufs, nb_pkts);
408 #endif
409 }
410
411 static uint16_t
412 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
413 {
414         uint16_t nb_rx;
415
416         if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
417                 return af_xdp_rx(queue, bufs, nb_pkts);
418
419         /* Split larger batch into smaller batches of size
420          * ETH_AF_XDP_RX_BATCH_SIZE or less.
421          */
422         nb_rx = 0;
423         while (nb_pkts) {
424                 uint16_t ret, n;
425
426                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
427                 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
428                 nb_rx = (uint16_t)(nb_rx + ret);
429                 nb_pkts = (uint16_t)(nb_pkts - ret);
430                 if (ret < n)
431                         break;
432         }
433
434         return nb_rx;
435 }
436
437 static void
438 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
439 {
440         size_t i, n;
441         uint32_t idx_cq = 0;
442
443         n = xsk_ring_cons__peek(cq, size, &idx_cq);
444
445         for (i = 0; i < n; i++) {
446                 uint64_t addr;
447                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
448 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
449                 addr = xsk_umem__extract_addr(addr);
450                 rte_pktmbuf_free((struct rte_mbuf *)
451                                         xsk_umem__get_data(umem->buffer,
452                                         addr + umem->mb_pool->header_size));
453 #else
454                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
455 #endif
456         }
457
458         xsk_ring_cons__release(cq, n);
459 }
460
461 static void
462 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
463 {
464         struct xsk_umem_info *umem = txq->umem;
465
466         pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
467
468         if (tx_syscall_needed(&txq->tx))
469                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
470                             0, MSG_DONTWAIT) < 0) {
471                         /* some thing unexpected */
472                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
473                                 break;
474
475                         /* pull from completion queue to leave more space */
476                         if (errno == EAGAIN)
477                                 pull_umem_cq(umem,
478                                              XSK_RING_CONS__DEFAULT_NUM_DESCS,
479                                              cq);
480                 }
481 }
482
483 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
484 static uint16_t
485 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
486 {
487         struct pkt_tx_queue *txq = queue;
488         struct xsk_umem_info *umem = txq->umem;
489         struct rte_mbuf *mbuf;
490         unsigned long tx_bytes = 0;
491         int i;
492         uint32_t idx_tx;
493         uint16_t count = 0;
494         struct xdp_desc *desc;
495         uint64_t addr, offset;
496         struct xsk_ring_cons *cq = &txq->pair->cq;
497         uint32_t free_thresh = cq->size >> 1;
498
499         if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
500                 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
501
502         for (i = 0; i < nb_pkts; i++) {
503                 mbuf = bufs[i];
504
505                 if (mbuf->pool == umem->mb_pool) {
506                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
507                                 kick_tx(txq, cq);
508                                 if (!xsk_ring_prod__reserve(&txq->tx, 1,
509                                                             &idx_tx))
510                                         goto out;
511                         }
512                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
513                         desc->len = mbuf->pkt_len;
514                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
515                                         umem->mb_pool->header_size;
516                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
517                                         (uint64_t)mbuf +
518                                         umem->mb_pool->header_size;
519                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
520                         desc->addr = addr | offset;
521                         count++;
522                 } else {
523                         struct rte_mbuf *local_mbuf =
524                                         rte_pktmbuf_alloc(umem->mb_pool);
525                         void *pkt;
526
527                         if (local_mbuf == NULL)
528                                 goto out;
529
530                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
531                                 rte_pktmbuf_free(local_mbuf);
532                                 goto out;
533                         }
534
535                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
536                         desc->len = mbuf->pkt_len;
537
538                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
539                                         umem->mb_pool->header_size;
540                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
541                                         (uint64_t)local_mbuf +
542                                         umem->mb_pool->header_size;
543                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
544                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
545                         desc->addr = addr | offset;
546                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
547                                         desc->len);
548                         rte_pktmbuf_free(mbuf);
549                         count++;
550                 }
551
552                 tx_bytes += mbuf->pkt_len;
553         }
554
555 out:
556         xsk_ring_prod__submit(&txq->tx, count);
557         kick_tx(txq, cq);
558
559         txq->stats.tx_pkts += count;
560         txq->stats.tx_bytes += tx_bytes;
561         txq->stats.tx_dropped += nb_pkts - count;
562
563         return count;
564 }
565 #else
566 static uint16_t
567 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
568 {
569         struct pkt_tx_queue *txq = queue;
570         struct xsk_umem_info *umem = txq->umem;
571         struct rte_mbuf *mbuf;
572         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
573         unsigned long tx_bytes = 0;
574         int i;
575         uint32_t idx_tx;
576         struct xsk_ring_cons *cq = &txq->pair->cq;
577
578         pull_umem_cq(umem, nb_pkts, cq);
579
580         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
581                                         nb_pkts, NULL);
582         if (nb_pkts == 0)
583                 return 0;
584
585         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
586                 kick_tx(txq, cq);
587                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
588                 return 0;
589         }
590
591         for (i = 0; i < nb_pkts; i++) {
592                 struct xdp_desc *desc;
593                 void *pkt;
594
595                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
596                 mbuf = bufs[i];
597                 desc->len = mbuf->pkt_len;
598
599                 desc->addr = (uint64_t)addrs[i];
600                 pkt = xsk_umem__get_data(umem->mz->addr,
601                                          desc->addr);
602                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
603                 tx_bytes += mbuf->pkt_len;
604                 rte_pktmbuf_free(mbuf);
605         }
606
607         xsk_ring_prod__submit(&txq->tx, nb_pkts);
608
609         kick_tx(txq, cq);
610
611         txq->stats.tx_pkts += nb_pkts;
612         txq->stats.tx_bytes += tx_bytes;
613
614         return nb_pkts;
615 }
616
617 static uint16_t
618 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
619 {
620         uint16_t nb_tx;
621
622         if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
623                 return af_xdp_tx_cp(queue, bufs, nb_pkts);
624
625         nb_tx = 0;
626         while (nb_pkts) {
627                 uint16_t ret, n;
628
629                 /* Split larger batch into smaller batches of size
630                  * ETH_AF_XDP_TX_BATCH_SIZE or less.
631                  */
632                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
633                 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
634                 nb_tx = (uint16_t)(nb_tx + ret);
635                 nb_pkts = (uint16_t)(nb_pkts - ret);
636                 if (ret < n)
637                         break;
638         }
639
640         return nb_tx;
641 }
642 #endif
643
644 static uint16_t
645 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
646 {
647 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
648         return af_xdp_tx_zc(queue, bufs, nb_pkts);
649 #else
650         return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
651 #endif
652 }
653
654 static int
655 eth_dev_start(struct rte_eth_dev *dev)
656 {
657         dev->data->dev_link.link_status = RTE_ETH_LINK_UP;
658
659         return 0;
660 }
661
662 /* This function gets called when the current port gets stopped. */
663 static int
664 eth_dev_stop(struct rte_eth_dev *dev)
665 {
666         dev->data->dev_link.link_status = RTE_ETH_LINK_DOWN;
667         return 0;
668 }
669
670 /* Find ethdev in list */
671 static inline struct internal_list *
672 find_internal_resource(struct pmd_internals *port_int)
673 {
674         int found = 0;
675         struct internal_list *list = NULL;
676
677         if (port_int == NULL)
678                 return NULL;
679
680         pthread_mutex_lock(&internal_list_lock);
681
682         TAILQ_FOREACH(list, &internal_list, next) {
683                 struct pmd_internals *list_int =
684                                 list->eth_dev->data->dev_private;
685                 if (list_int == port_int) {
686                         found = 1;
687                         break;
688                 }
689         }
690
691         pthread_mutex_unlock(&internal_list_lock);
692
693         if (!found)
694                 return NULL;
695
696         return list;
697 }
698
699 static int
700 eth_dev_configure(struct rte_eth_dev *dev)
701 {
702         struct pmd_internals *internal = dev->data->dev_private;
703
704         /* rx/tx must be paired */
705         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
706                 return -EINVAL;
707
708         if (internal->shared_umem) {
709                 struct internal_list *list = NULL;
710                 const char *name = dev->device->name;
711
712                 /* Ensure PMD is not already inserted into the list */
713                 list = find_internal_resource(internal);
714                 if (list)
715                         return 0;
716
717                 list = rte_zmalloc_socket(name, sizeof(*list), 0,
718                                         dev->device->numa_node);
719                 if (list == NULL)
720                         return -1;
721
722                 list->eth_dev = dev;
723                 pthread_mutex_lock(&internal_list_lock);
724                 TAILQ_INSERT_TAIL(&internal_list, list, next);
725                 pthread_mutex_unlock(&internal_list_lock);
726         }
727
728         return 0;
729 }
730
731 #define CLB_VAL_IDX 0
732 static int
733 eth_monitor_callback(const uint64_t value,
734                 const uint64_t opaque[RTE_POWER_MONITOR_OPAQUE_SZ])
735 {
736         const uint64_t v = opaque[CLB_VAL_IDX];
737         const uint64_t m = (uint32_t)~0;
738
739         /* if the value has changed, abort entering power optimized state */
740         return (value & m) == v ? 0 : -1;
741 }
742
743 static int
744 eth_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
745 {
746         struct pkt_rx_queue *rxq = rx_queue;
747         unsigned int *prod = rxq->rx.producer;
748         const uint32_t cur_val = rxq->rx.cached_prod; /* use cached value */
749
750         /* watch for changes in producer ring */
751         pmc->addr = (void *)prod;
752
753         /* store current value */
754         pmc->opaque[CLB_VAL_IDX] = cur_val;
755         pmc->fn = eth_monitor_callback;
756
757         /* AF_XDP producer ring index is 32-bit */
758         pmc->size = sizeof(uint32_t);
759
760         return 0;
761 }
762
763 static int
764 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
765 {
766         struct pmd_internals *internals = dev->data->dev_private;
767
768         dev_info->if_index = internals->if_index;
769         dev_info->max_mac_addrs = 1;
770         dev_info->max_rx_queues = internals->queue_cnt;
771         dev_info->max_tx_queues = internals->queue_cnt;
772
773         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
774 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
775         dev_info->max_rx_pktlen = getpagesize() -
776                                   sizeof(struct rte_mempool_objhdr) -
777                                   sizeof(struct rte_mbuf) -
778                                   RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
779 #else
780         dev_info->max_rx_pktlen = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
781 #endif
782         dev_info->max_mtu = dev_info->max_rx_pktlen - ETH_AF_XDP_ETH_OVERHEAD;
783
784         dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
785         dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
786         dev_info->default_rxportconf.nb_queues = 1;
787         dev_info->default_txportconf.nb_queues = 1;
788         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
789         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
790
791         return 0;
792 }
793
794 static int
795 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
796 {
797         struct pmd_internals *internals = dev->data->dev_private;
798         struct xdp_statistics xdp_stats;
799         struct pkt_rx_queue *rxq;
800         struct pkt_tx_queue *txq;
801         socklen_t optlen;
802         int i, ret;
803
804         for (i = 0; i < dev->data->nb_rx_queues; i++) {
805                 optlen = sizeof(struct xdp_statistics);
806                 rxq = &internals->rx_queues[i];
807                 txq = rxq->pair;
808                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
809                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
810
811                 stats->q_opackets[i] = txq->stats.tx_pkts;
812                 stats->q_obytes[i] = txq->stats.tx_bytes;
813
814                 stats->ipackets += stats->q_ipackets[i];
815                 stats->ibytes += stats->q_ibytes[i];
816                 stats->imissed += rxq->stats.rx_dropped;
817                 stats->oerrors += txq->stats.tx_dropped;
818                 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
819                                 XDP_STATISTICS, &xdp_stats, &optlen);
820                 if (ret != 0) {
821                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
822                         return -1;
823                 }
824                 stats->imissed += xdp_stats.rx_dropped;
825
826                 stats->opackets += stats->q_opackets[i];
827                 stats->obytes += stats->q_obytes[i];
828         }
829
830         return 0;
831 }
832
833 static int
834 eth_stats_reset(struct rte_eth_dev *dev)
835 {
836         struct pmd_internals *internals = dev->data->dev_private;
837         int i;
838
839         for (i = 0; i < internals->queue_cnt; i++) {
840                 memset(&internals->rx_queues[i].stats, 0,
841                                         sizeof(struct rx_stats));
842                 memset(&internals->tx_queues[i].stats, 0,
843                                         sizeof(struct tx_stats));
844         }
845
846         return 0;
847 }
848
849 static void
850 remove_xdp_program(struct pmd_internals *internals)
851 {
852         uint32_t curr_prog_id = 0;
853
854         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
855                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
856                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
857                 return;
858         }
859         bpf_set_link_xdp_fd(internals->if_index, -1,
860                         XDP_FLAGS_UPDATE_IF_NOEXIST);
861 }
862
863 static void
864 xdp_umem_destroy(struct xsk_umem_info *umem)
865 {
866 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
867         umem->mb_pool = NULL;
868 #else
869         rte_memzone_free(umem->mz);
870         umem->mz = NULL;
871
872         rte_ring_free(umem->buf_ring);
873         umem->buf_ring = NULL;
874 #endif
875
876         rte_free(umem);
877 }
878
879 static int
880 eth_dev_close(struct rte_eth_dev *dev)
881 {
882         struct pmd_internals *internals = dev->data->dev_private;
883         struct pkt_rx_queue *rxq;
884         int i;
885
886         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
887                 return 0;
888
889         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
890                 rte_socket_id());
891
892         for (i = 0; i < internals->queue_cnt; i++) {
893                 rxq = &internals->rx_queues[i];
894                 if (rxq->umem == NULL)
895                         break;
896                 xsk_socket__delete(rxq->xsk);
897
898                 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
899                                 == 0) {
900                         (void)xsk_umem__delete(rxq->umem->umem);
901                         xdp_umem_destroy(rxq->umem);
902                 }
903
904                 /* free pkt_tx_queue */
905                 rte_free(rxq->pair);
906                 rte_free(rxq);
907         }
908
909         /*
910          * MAC is not allocated dynamically, setting it to NULL would prevent
911          * from releasing it in rte_eth_dev_release_port.
912          */
913         dev->data->mac_addrs = NULL;
914
915         remove_xdp_program(internals);
916
917         if (internals->shared_umem) {
918                 struct internal_list *list;
919
920                 /* Remove ethdev from list used to track and share UMEMs */
921                 list = find_internal_resource(internals);
922                 if (list) {
923                         pthread_mutex_lock(&internal_list_lock);
924                         TAILQ_REMOVE(&internal_list, list, next);
925                         pthread_mutex_unlock(&internal_list_lock);
926                         rte_free(list);
927                 }
928         }
929
930         return 0;
931 }
932
933 static int
934 eth_link_update(struct rte_eth_dev *dev __rte_unused,
935                 int wait_to_complete __rte_unused)
936 {
937         return 0;
938 }
939
940 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
941 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
942 {
943         struct rte_mempool_memhdr *memhdr;
944         uintptr_t memhdr_addr, aligned_addr;
945
946         memhdr = STAILQ_FIRST(&mp->mem_list);
947         memhdr_addr = (uintptr_t)memhdr->addr;
948         aligned_addr = memhdr_addr & ~(getpagesize() - 1);
949         *align = memhdr_addr - aligned_addr;
950
951         return aligned_addr;
952 }
953
954 /* Check if the netdev,qid context already exists */
955 static inline bool
956 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
957                 struct pkt_rx_queue *list_rxq, const char *list_ifname)
958 {
959         bool exists = false;
960
961         if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
962                         !strncmp(ifname, list_ifname, IFNAMSIZ)) {
963                 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
964                                         ifname, rxq->xsk_queue_idx);
965                 exists = true;
966         }
967
968         return exists;
969 }
970
971 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
972 static inline int
973 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
974                         struct xsk_umem_info **umem)
975 {
976         struct internal_list *list;
977         struct pmd_internals *internals;
978         int i = 0, ret = 0;
979         struct rte_mempool *mb_pool = rxq->mb_pool;
980
981         if (mb_pool == NULL)
982                 return ret;
983
984         pthread_mutex_lock(&internal_list_lock);
985
986         TAILQ_FOREACH(list, &internal_list, next) {
987                 internals = list->eth_dev->data->dev_private;
988                 for (i = 0; i < internals->queue_cnt; i++) {
989                         struct pkt_rx_queue *list_rxq =
990                                                 &internals->rx_queues[i];
991                         if (rxq == list_rxq)
992                                 continue;
993                         if (mb_pool == internals->rx_queues[i].mb_pool) {
994                                 if (ctx_exists(rxq, ifname, list_rxq,
995                                                 internals->if_name)) {
996                                         ret = -1;
997                                         goto out;
998                                 }
999                                 if (__atomic_load_n(&internals->rx_queues[i].umem->refcnt,
1000                                                     __ATOMIC_ACQUIRE)) {
1001                                         *umem = internals->rx_queues[i].umem;
1002                                         goto out;
1003                                 }
1004                         }
1005                 }
1006         }
1007
1008 out:
1009         pthread_mutex_unlock(&internal_list_lock);
1010
1011         return ret;
1012 }
1013
1014 static struct
1015 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1016                                   struct pkt_rx_queue *rxq)
1017 {
1018         struct xsk_umem_info *umem = NULL;
1019         int ret;
1020         struct xsk_umem_config usr_config = {
1021                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
1022                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1023                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
1024         void *base_addr = NULL;
1025         struct rte_mempool *mb_pool = rxq->mb_pool;
1026         uint64_t umem_size, align = 0;
1027
1028         if (internals->shared_umem) {
1029                 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
1030                         return NULL;
1031
1032                 if (umem != NULL &&
1033                         __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
1034                                         umem->max_xsks) {
1035                         AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
1036                                         internals->if_name, rxq->xsk_queue_idx);
1037                         __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
1038                 }
1039         }
1040
1041         if (umem == NULL) {
1042                 usr_config.frame_size =
1043                         rte_mempool_calc_obj_size(mb_pool->elt_size,
1044                                                   mb_pool->flags, NULL);
1045                 usr_config.frame_headroom = mb_pool->header_size +
1046                                                 sizeof(struct rte_mbuf) +
1047                                                 rte_pktmbuf_priv_size(mb_pool) +
1048                                                 RTE_PKTMBUF_HEADROOM;
1049
1050                 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1051                                           rte_socket_id());
1052                 if (umem == NULL) {
1053                         AF_XDP_LOG(ERR, "Failed to allocate umem info");
1054                         return NULL;
1055                 }
1056
1057                 umem->mb_pool = mb_pool;
1058                 base_addr = (void *)get_base_addr(mb_pool, &align);
1059                 umem_size = (uint64_t)mb_pool->populated_size *
1060                                 (uint64_t)usr_config.frame_size +
1061                                 align;
1062
1063                 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1064                                 &rxq->fq, &rxq->cq, &usr_config);
1065                 if (ret) {
1066                         AF_XDP_LOG(ERR, "Failed to create umem");
1067                         goto err;
1068                 }
1069                 umem->buffer = base_addr;
1070
1071                 if (internals->shared_umem) {
1072                         umem->max_xsks = mb_pool->populated_size /
1073                                                 ETH_AF_XDP_NUM_BUFFERS;
1074                         AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1075                                                 mb_pool->name, umem->max_xsks);
1076                 }
1077
1078                 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1079         }
1080
1081 #else
1082 static struct
1083 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1084                                   struct pkt_rx_queue *rxq)
1085 {
1086         struct xsk_umem_info *umem;
1087         const struct rte_memzone *mz;
1088         struct xsk_umem_config usr_config = {
1089                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1090                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1091                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1092                 .frame_headroom = 0 };
1093         char ring_name[RTE_RING_NAMESIZE];
1094         char mz_name[RTE_MEMZONE_NAMESIZE];
1095         int ret;
1096         uint64_t i;
1097
1098         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1099         if (umem == NULL) {
1100                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1101                 return NULL;
1102         }
1103
1104         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1105                        internals->if_name, rxq->xsk_queue_idx);
1106         umem->buf_ring = rte_ring_create(ring_name,
1107                                          ETH_AF_XDP_NUM_BUFFERS,
1108                                          rte_socket_id(),
1109                                          0x0);
1110         if (umem->buf_ring == NULL) {
1111                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1112                 goto err;
1113         }
1114
1115         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1116                 rte_ring_enqueue(umem->buf_ring,
1117                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1118
1119         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1120                        internals->if_name, rxq->xsk_queue_idx);
1121         mz = rte_memzone_reserve_aligned(mz_name,
1122                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1123                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1124                         getpagesize());
1125         if (mz == NULL) {
1126                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1127                 goto err;
1128         }
1129
1130         ret = xsk_umem__create(&umem->umem, mz->addr,
1131                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1132                                &rxq->fq, &rxq->cq,
1133                                &usr_config);
1134
1135         if (ret) {
1136                 AF_XDP_LOG(ERR, "Failed to create umem");
1137                 goto err;
1138         }
1139         umem->mz = mz;
1140
1141 #endif
1142         return umem;
1143
1144 err:
1145         xdp_umem_destroy(umem);
1146         return NULL;
1147 }
1148
1149 static int
1150 load_custom_xdp_prog(const char *prog_path, int if_index, struct bpf_map **map)
1151 {
1152         int ret, prog_fd = -1;
1153         struct bpf_object *obj;
1154
1155         ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1156         if (ret) {
1157                 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1158                 return ret;
1159         }
1160
1161         /*
1162          * The loaded program must provision for a map of xsks, such that some
1163          * traffic can be redirected to userspace.
1164          */
1165         *map = bpf_object__find_map_by_name(obj, "xsks_map");
1166         if (!*map) {
1167                 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1168                 return -1;
1169         }
1170
1171         /* Link the program with the given network device */
1172         ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1173                                         XDP_FLAGS_UPDATE_IF_NOEXIST);
1174         if (ret) {
1175                 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1176                                 prog_fd);
1177                 return -1;
1178         }
1179
1180         AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1181                                 prog_path, prog_fd);
1182
1183         return 0;
1184 }
1185
1186 /* Detect support for busy polling through setsockopt(). */
1187 static int
1188 configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
1189 {
1190         int sock_opt = 1;
1191         int fd = xsk_socket__fd(rxq->xsk);
1192         int ret = 0;
1193
1194         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1195                         (void *)&sock_opt, sizeof(sock_opt));
1196         if (ret < 0) {
1197                 AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
1198                 goto err_prefer;
1199         }
1200
1201         sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
1202         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1203                         sizeof(sock_opt));
1204         if (ret < 0) {
1205                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
1206                 goto err_timeout;
1207         }
1208
1209         sock_opt = rxq->busy_budget;
1210         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1211                         (void *)&sock_opt, sizeof(sock_opt));
1212         if (ret < 0) {
1213                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
1214         } else {
1215                 AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
1216                                         rxq->busy_budget);
1217                 return 0;
1218         }
1219
1220         /* setsockopt failure - attempt to restore xsk to default state and
1221          * proceed without busy polling support.
1222          */
1223         sock_opt = 0;
1224         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1225                         sizeof(sock_opt));
1226         if (ret < 0) {
1227                 AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
1228                 return -1;
1229         }
1230
1231 err_timeout:
1232         sock_opt = 0;
1233         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1234                         (void *)&sock_opt, sizeof(sock_opt));
1235         if (ret < 0) {
1236                 AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
1237                 return -1;
1238         }
1239
1240 err_prefer:
1241         rxq->busy_budget = 0;
1242         return 0;
1243 }
1244
1245 static int
1246 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1247               int ring_size)
1248 {
1249         struct xsk_socket_config cfg;
1250         struct pkt_tx_queue *txq = rxq->pair;
1251         int ret = 0;
1252         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1253         struct rte_mbuf *fq_bufs[reserve_size];
1254
1255         rxq->umem = xdp_umem_configure(internals, rxq);
1256         if (rxq->umem == NULL)
1257                 return -ENOMEM;
1258         txq->umem = rxq->umem;
1259
1260         cfg.rx_size = ring_size;
1261         cfg.tx_size = ring_size;
1262         cfg.libbpf_flags = 0;
1263         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1264         cfg.bind_flags = 0;
1265
1266 #if defined(XDP_USE_NEED_WAKEUP)
1267         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1268 #endif
1269
1270         if (strnlen(internals->prog_path, PATH_MAX) &&
1271                                 !internals->custom_prog_configured) {
1272                 ret = load_custom_xdp_prog(internals->prog_path,
1273                                            internals->if_index,
1274                                            &internals->map);
1275                 if (ret) {
1276                         AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1277                                         internals->prog_path);
1278                         goto err;
1279                 }
1280                 internals->custom_prog_configured = 1;
1281                 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1282         }
1283
1284         if (internals->shared_umem)
1285                 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1286                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1287                                 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1288         else
1289                 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1290                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1291                                 &txq->tx, &cfg);
1292
1293         if (ret) {
1294                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1295                 goto err;
1296         }
1297
1298         /* insert the xsk into the xsks_map */
1299         if (internals->custom_prog_configured) {
1300                 int err, fd;
1301
1302                 fd = xsk_socket__fd(rxq->xsk);
1303                 err = bpf_map_update_elem(bpf_map__fd(internals->map),
1304                                           &rxq->xsk_queue_idx, &fd, 0);
1305                 if (err) {
1306                         AF_XDP_LOG(ERR, "Failed to insert xsk in map.\n");
1307                         goto err;
1308                 }
1309         }
1310
1311 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1312         ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1313         if (ret) {
1314                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1315                 goto err;
1316         }
1317 #endif
1318
1319         if (rxq->busy_budget) {
1320                 ret = configure_preferred_busy_poll(rxq);
1321                 if (ret) {
1322                         AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
1323                         goto err;
1324                 }
1325         }
1326
1327         ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1328         if (ret) {
1329                 xsk_socket__delete(rxq->xsk);
1330                 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1331                 goto err;
1332         }
1333
1334         return 0;
1335
1336 err:
1337         if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1338                 xdp_umem_destroy(rxq->umem);
1339
1340         return ret;
1341 }
1342
1343 static int
1344 eth_rx_queue_setup(struct rte_eth_dev *dev,
1345                    uint16_t rx_queue_id,
1346                    uint16_t nb_rx_desc,
1347                    unsigned int socket_id __rte_unused,
1348                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1349                    struct rte_mempool *mb_pool)
1350 {
1351         struct pmd_internals *internals = dev->data->dev_private;
1352         struct pkt_rx_queue *rxq;
1353         int ret;
1354
1355         rxq = &internals->rx_queues[rx_queue_id];
1356
1357         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1358                    rx_queue_id, rxq->xsk_queue_idx);
1359
1360 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1361         uint32_t buf_size, data_size;
1362
1363         /* Now get the space available for data in the mbuf */
1364         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1365                 RTE_PKTMBUF_HEADROOM;
1366         data_size = ETH_AF_XDP_FRAME_SIZE;
1367
1368         if (data_size > buf_size) {
1369                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1370                         dev->device->name, data_size, buf_size);
1371                 ret = -ENOMEM;
1372                 goto err;
1373         }
1374 #endif
1375
1376         rxq->mb_pool = mb_pool;
1377
1378         if (xsk_configure(internals, rxq, nb_rx_desc)) {
1379                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1380                 ret = -EINVAL;
1381                 goto err;
1382         }
1383
1384         if (!rxq->busy_budget)
1385                 AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
1386
1387         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1388         rxq->fds[0].events = POLLIN;
1389
1390         dev->data->rx_queues[rx_queue_id] = rxq;
1391         return 0;
1392
1393 err:
1394         return ret;
1395 }
1396
1397 static int
1398 eth_tx_queue_setup(struct rte_eth_dev *dev,
1399                    uint16_t tx_queue_id,
1400                    uint16_t nb_tx_desc __rte_unused,
1401                    unsigned int socket_id __rte_unused,
1402                    const struct rte_eth_txconf *tx_conf __rte_unused)
1403 {
1404         struct pmd_internals *internals = dev->data->dev_private;
1405         struct pkt_tx_queue *txq;
1406
1407         txq = &internals->tx_queues[tx_queue_id];
1408
1409         dev->data->tx_queues[tx_queue_id] = txq;
1410         return 0;
1411 }
1412
1413 static int
1414 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1415 {
1416         struct pmd_internals *internals = dev->data->dev_private;
1417         struct ifreq ifr = { .ifr_mtu = mtu };
1418         int ret;
1419         int s;
1420
1421         s = socket(PF_INET, SOCK_DGRAM, 0);
1422         if (s < 0)
1423                 return -EINVAL;
1424
1425         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1426         ret = ioctl(s, SIOCSIFMTU, &ifr);
1427         close(s);
1428
1429         return (ret < 0) ? -errno : 0;
1430 }
1431
1432 static int
1433 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1434 {
1435         struct ifreq ifr;
1436         int ret = 0;
1437         int s;
1438
1439         s = socket(PF_INET, SOCK_DGRAM, 0);
1440         if (s < 0)
1441                 return -errno;
1442
1443         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1444         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1445                 ret = -errno;
1446                 goto out;
1447         }
1448         ifr.ifr_flags &= mask;
1449         ifr.ifr_flags |= flags;
1450         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1451                 ret = -errno;
1452                 goto out;
1453         }
1454 out:
1455         close(s);
1456         return ret;
1457 }
1458
1459 static int
1460 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1461 {
1462         struct pmd_internals *internals = dev->data->dev_private;
1463
1464         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1465 }
1466
1467 static int
1468 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1469 {
1470         struct pmd_internals *internals = dev->data->dev_private;
1471
1472         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1473 }
1474
1475 static const struct eth_dev_ops ops = {
1476         .dev_start = eth_dev_start,
1477         .dev_stop = eth_dev_stop,
1478         .dev_close = eth_dev_close,
1479         .dev_configure = eth_dev_configure,
1480         .dev_infos_get = eth_dev_info,
1481         .mtu_set = eth_dev_mtu_set,
1482         .promiscuous_enable = eth_dev_promiscuous_enable,
1483         .promiscuous_disable = eth_dev_promiscuous_disable,
1484         .rx_queue_setup = eth_rx_queue_setup,
1485         .tx_queue_setup = eth_tx_queue_setup,
1486         .link_update = eth_link_update,
1487         .stats_get = eth_stats_get,
1488         .stats_reset = eth_stats_reset,
1489         .get_monitor_addr = eth_get_monitor_addr,
1490 };
1491
1492 /** parse busy_budget argument */
1493 static int
1494 parse_budget_arg(const char *key __rte_unused,
1495                   const char *value, void *extra_args)
1496 {
1497         int *i = (int *)extra_args;
1498         char *end;
1499
1500         *i = strtol(value, &end, 10);
1501         if (*i < 0 || *i > UINT16_MAX) {
1502                 AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
1503                                 *i, UINT16_MAX);
1504                 return -EINVAL;
1505         }
1506
1507         return 0;
1508 }
1509
1510 /** parse integer from integer argument */
1511 static int
1512 parse_integer_arg(const char *key __rte_unused,
1513                   const char *value, void *extra_args)
1514 {
1515         int *i = (int *)extra_args;
1516         char *end;
1517
1518         *i = strtol(value, &end, 10);
1519         if (*i < 0) {
1520                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1521                 return -EINVAL;
1522         }
1523
1524         return 0;
1525 }
1526
1527 /** parse name argument */
1528 static int
1529 parse_name_arg(const char *key __rte_unused,
1530                const char *value, void *extra_args)
1531 {
1532         char *name = extra_args;
1533
1534         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1535                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1536                            value, IFNAMSIZ);
1537                 return -EINVAL;
1538         }
1539
1540         strlcpy(name, value, IFNAMSIZ);
1541
1542         return 0;
1543 }
1544
1545 /** parse xdp prog argument */
1546 static int
1547 parse_prog_arg(const char *key __rte_unused,
1548                const char *value, void *extra_args)
1549 {
1550         char *path = extra_args;
1551
1552         if (strnlen(value, PATH_MAX) == PATH_MAX) {
1553                 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1554                            value, PATH_MAX);
1555                 return -EINVAL;
1556         }
1557
1558         if (access(value, F_OK) != 0) {
1559                 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1560                            value, strerror(errno));
1561                 return -EINVAL;
1562         }
1563
1564         strlcpy(path, value, PATH_MAX);
1565
1566         return 0;
1567 }
1568
1569 static int
1570 xdp_get_channels_info(const char *if_name, int *max_queues,
1571                                 int *combined_queues)
1572 {
1573         struct ethtool_channels channels;
1574         struct ifreq ifr;
1575         int fd, ret;
1576
1577         fd = socket(AF_INET, SOCK_DGRAM, 0);
1578         if (fd < 0)
1579                 return -1;
1580
1581         channels.cmd = ETHTOOL_GCHANNELS;
1582         ifr.ifr_data = (void *)&channels;
1583         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1584         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1585         if (ret) {
1586                 if (errno == EOPNOTSUPP) {
1587                         ret = 0;
1588                 } else {
1589                         ret = -errno;
1590                         goto out;
1591                 }
1592         }
1593
1594         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1595                 /* If the device says it has no channels, then all traffic
1596                  * is sent to a single stream, so max queues = 1.
1597                  */
1598                 *max_queues = 1;
1599                 *combined_queues = 1;
1600         } else {
1601                 *max_queues = channels.max_combined;
1602                 *combined_queues = channels.combined_count;
1603         }
1604
1605  out:
1606         close(fd);
1607         return ret;
1608 }
1609
1610 static int
1611 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1612                         int *queue_cnt, int *shared_umem, char *prog_path,
1613                         int *busy_budget)
1614 {
1615         int ret;
1616
1617         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1618                                  &parse_name_arg, if_name);
1619         if (ret < 0)
1620                 goto free_kvlist;
1621
1622         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1623                                  &parse_integer_arg, start_queue);
1624         if (ret < 0)
1625                 goto free_kvlist;
1626
1627         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1628                                  &parse_integer_arg, queue_cnt);
1629         if (ret < 0 || *queue_cnt <= 0) {
1630                 ret = -EINVAL;
1631                 goto free_kvlist;
1632         }
1633
1634         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1635                                 &parse_integer_arg, shared_umem);
1636         if (ret < 0)
1637                 goto free_kvlist;
1638
1639         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1640                                  &parse_prog_arg, prog_path);
1641         if (ret < 0)
1642                 goto free_kvlist;
1643
1644         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
1645                                 &parse_budget_arg, busy_budget);
1646         if (ret < 0)
1647                 goto free_kvlist;
1648
1649 free_kvlist:
1650         rte_kvargs_free(kvlist);
1651         return ret;
1652 }
1653
1654 static int
1655 get_iface_info(const char *if_name,
1656                struct rte_ether_addr *eth_addr,
1657                int *if_index)
1658 {
1659         struct ifreq ifr;
1660         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1661
1662         if (sock < 0)
1663                 return -1;
1664
1665         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1666         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1667                 goto error;
1668
1669         *if_index = ifr.ifr_ifindex;
1670
1671         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1672                 goto error;
1673
1674         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1675
1676         close(sock);
1677         return 0;
1678
1679 error:
1680         close(sock);
1681         return -1;
1682 }
1683
1684 static struct rte_eth_dev *
1685 init_internals(struct rte_vdev_device *dev, const char *if_name,
1686                 int start_queue_idx, int queue_cnt, int shared_umem,
1687                 const char *prog_path, int busy_budget)
1688 {
1689         const char *name = rte_vdev_device_name(dev);
1690         const unsigned int numa_node = dev->device.numa_node;
1691         struct pmd_internals *internals;
1692         struct rte_eth_dev *eth_dev;
1693         int ret;
1694         int i;
1695
1696         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1697         if (internals == NULL)
1698                 return NULL;
1699
1700         internals->start_queue_idx = start_queue_idx;
1701         internals->queue_cnt = queue_cnt;
1702         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1703         strlcpy(internals->prog_path, prog_path, PATH_MAX);
1704         internals->custom_prog_configured = 0;
1705
1706 #ifndef ETH_AF_XDP_SHARED_UMEM
1707         if (shared_umem) {
1708                 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1709                                 "Check kernel and libbpf version\n");
1710                 goto err_free_internals;
1711         }
1712 #endif
1713         internals->shared_umem = shared_umem;
1714
1715         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1716                                   &internals->combined_queue_cnt)) {
1717                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1718                                 if_name);
1719                 goto err_free_internals;
1720         }
1721
1722         if (queue_cnt > internals->combined_queue_cnt) {
1723                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1724                                 queue_cnt, internals->combined_queue_cnt);
1725                 goto err_free_internals;
1726         }
1727
1728         internals->rx_queues = rte_zmalloc_socket(NULL,
1729                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1730                                         0, numa_node);
1731         if (internals->rx_queues == NULL) {
1732                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1733                 goto err_free_internals;
1734         }
1735
1736         internals->tx_queues = rte_zmalloc_socket(NULL,
1737                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1738                                         0, numa_node);
1739         if (internals->tx_queues == NULL) {
1740                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1741                 goto err_free_rx;
1742         }
1743         for (i = 0; i < queue_cnt; i++) {
1744                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1745                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1746                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1747                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1748                 internals->rx_queues[i].busy_budget = busy_budget;
1749         }
1750
1751         ret = get_iface_info(if_name, &internals->eth_addr,
1752                              &internals->if_index);
1753         if (ret)
1754                 goto err_free_tx;
1755
1756         eth_dev = rte_eth_vdev_allocate(dev, 0);
1757         if (eth_dev == NULL)
1758                 goto err_free_tx;
1759
1760         eth_dev->data->dev_private = internals;
1761         eth_dev->data->dev_link = pmd_link;
1762         eth_dev->data->mac_addrs = &internals->eth_addr;
1763         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1764         eth_dev->dev_ops = &ops;
1765         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1766         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1767
1768 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1769         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1770 #endif
1771
1772         return eth_dev;
1773
1774 err_free_tx:
1775         rte_free(internals->tx_queues);
1776 err_free_rx:
1777         rte_free(internals->rx_queues);
1778 err_free_internals:
1779         rte_free(internals);
1780         return NULL;
1781 }
1782
1783 static int
1784 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1785 {
1786         struct rte_kvargs *kvlist;
1787         char if_name[IFNAMSIZ] = {'\0'};
1788         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1789         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1790         int shared_umem = 0;
1791         char prog_path[PATH_MAX] = {'\0'};
1792         int busy_budget = -1;
1793         struct rte_eth_dev *eth_dev = NULL;
1794         const char *name;
1795
1796         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1797                 rte_vdev_device_name(dev));
1798
1799         name = rte_vdev_device_name(dev);
1800         if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
1801                 AF_XDP_LOG(ERR, "Failed to probe %s. "
1802                                 "AF_XDP PMD does not support secondary processes.\n",
1803                                 name);
1804                 return -ENOTSUP;
1805         }
1806
1807         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1808         if (kvlist == NULL) {
1809                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1810                 return -EINVAL;
1811         }
1812
1813         if (dev->device.numa_node == SOCKET_ID_ANY)
1814                 dev->device.numa_node = rte_socket_id();
1815
1816         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1817                              &xsk_queue_cnt, &shared_umem, prog_path,
1818                              &busy_budget) < 0) {
1819                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1820                 return -EINVAL;
1821         }
1822
1823         if (strlen(if_name) == 0) {
1824                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1825                 return -EINVAL;
1826         }
1827
1828         busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
1829                                         busy_budget;
1830
1831         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1832                                         xsk_queue_cnt, shared_umem, prog_path,
1833                                         busy_budget);
1834         if (eth_dev == NULL) {
1835                 AF_XDP_LOG(ERR, "Failed to init internals\n");
1836                 return -1;
1837         }
1838
1839         rte_eth_dev_probing_finish(eth_dev);
1840
1841         return 0;
1842 }
1843
1844 static int
1845 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1846 {
1847         struct rte_eth_dev *eth_dev = NULL;
1848
1849         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1850                 rte_socket_id());
1851
1852         if (dev == NULL)
1853                 return -1;
1854
1855         /* find the ethdev entry */
1856         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1857         if (eth_dev == NULL)
1858                 return 0;
1859
1860         eth_dev_close(eth_dev);
1861         rte_eth_dev_release_port(eth_dev);
1862
1863
1864         return 0;
1865 }
1866
1867 static struct rte_vdev_driver pmd_af_xdp_drv = {
1868         .probe = rte_pmd_af_xdp_probe,
1869         .remove = rte_pmd_af_xdp_remove,
1870 };
1871
1872 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1873 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1874                               "iface=<string> "
1875                               "start_queue=<int> "
1876                               "queue_count=<int> "
1877                               "shared_umem=<int> "
1878                               "xdp_prog=<string> "
1879                               "busy_budget=<int>");