net/af_xdp: fix build with Linux < 5.4
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019-2020 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <netinet/in.h>
9 #include <net/if.h>
10 #include <sys/socket.h>
11 #include <sys/ioctl.h>
12 #include <linux/if_ether.h>
13 #include <linux/if_xdp.h>
14 #include <linux/if_link.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include "af_xdp_deps.h"
18 #include <bpf/xsk.h>
19
20 #include <rte_ethdev.h>
21 #include <ethdev_driver.h>
22 #include <ethdev_vdev.h>
23 #include <rte_kvargs.h>
24 #include <rte_bus_vdev.h>
25 #include <rte_string_fns.h>
26 #include <rte_branch_prediction.h>
27 #include <rte_common.h>
28 #include <rte_dev.h>
29 #include <rte_eal.h>
30 #include <rte_ether.h>
31 #include <rte_lcore.h>
32 #include <rte_log.h>
33 #include <rte_memory.h>
34 #include <rte_memzone.h>
35 #include <rte_mempool.h>
36 #include <rte_mbuf.h>
37 #include <rte_malloc.h>
38 #include <rte_ring.h>
39 #include <rte_spinlock.h>
40
41 #include "compat.h"
42
43 #ifndef SO_PREFER_BUSY_POLL
44 #define SO_PREFER_BUSY_POLL 69
45 #endif
46 #ifndef SO_BUSY_POLL_BUDGET
47 #define SO_BUSY_POLL_BUDGET 70
48 #endif
49
50
51 #ifndef SOL_XDP
52 #define SOL_XDP 283
53 #endif
54
55 #ifndef AF_XDP
56 #define AF_XDP 44
57 #endif
58
59 #ifndef PF_XDP
60 #define PF_XDP AF_XDP
61 #endif
62
63 RTE_LOG_REGISTER_DEFAULT(af_xdp_logtype, NOTICE);
64
65 #define AF_XDP_LOG(level, fmt, args...)                 \
66         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
67                 "%s(): " fmt, __func__, ##args)
68
69 #define ETH_AF_XDP_FRAME_SIZE           2048
70 #define ETH_AF_XDP_NUM_BUFFERS          4096
71 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
72 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
73 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
74 #define ETH_AF_XDP_DFLT_BUSY_BUDGET     64
75 #define ETH_AF_XDP_DFLT_BUSY_TIMEOUT    20
76
77 #define ETH_AF_XDP_RX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
78 #define ETH_AF_XDP_TX_BATCH_SIZE        XSK_RING_CONS__DEFAULT_NUM_DESCS
79
80
81 struct xsk_umem_info {
82         struct xsk_umem *umem;
83         struct rte_ring *buf_ring;
84         const struct rte_memzone *mz;
85         struct rte_mempool *mb_pool;
86         void *buffer;
87         uint8_t refcnt;
88         uint32_t max_xsks;
89 };
90
91 struct rx_stats {
92         uint64_t rx_pkts;
93         uint64_t rx_bytes;
94         uint64_t rx_dropped;
95 };
96
97 struct pkt_rx_queue {
98         struct xsk_ring_cons rx;
99         struct xsk_umem_info *umem;
100         struct xsk_socket *xsk;
101         struct rte_mempool *mb_pool;
102
103         struct rx_stats stats;
104
105         struct xsk_ring_prod fq;
106         struct xsk_ring_cons cq;
107
108         struct pkt_tx_queue *pair;
109         struct pollfd fds[1];
110         int xsk_queue_idx;
111         int busy_budget;
112 };
113
114 struct tx_stats {
115         uint64_t tx_pkts;
116         uint64_t tx_bytes;
117         uint64_t tx_dropped;
118 };
119
120 struct pkt_tx_queue {
121         struct xsk_ring_prod tx;
122         struct xsk_umem_info *umem;
123
124         struct tx_stats stats;
125
126         struct pkt_rx_queue *pair;
127         int xsk_queue_idx;
128 };
129
130 struct pmd_internals {
131         int if_index;
132         char if_name[IFNAMSIZ];
133         int start_queue_idx;
134         int queue_cnt;
135         int max_queue_cnt;
136         int combined_queue_cnt;
137         bool shared_umem;
138         char prog_path[PATH_MAX];
139         bool custom_prog_configured;
140
141         struct rte_ether_addr eth_addr;
142
143         struct pkt_rx_queue *rx_queues;
144         struct pkt_tx_queue *tx_queues;
145 };
146
147 #define ETH_AF_XDP_IFACE_ARG                    "iface"
148 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
149 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
150 #define ETH_AF_XDP_SHARED_UMEM_ARG              "shared_umem"
151 #define ETH_AF_XDP_PROG_ARG                     "xdp_prog"
152 #define ETH_AF_XDP_BUDGET_ARG                   "busy_budget"
153
154 static const char * const valid_arguments[] = {
155         ETH_AF_XDP_IFACE_ARG,
156         ETH_AF_XDP_START_QUEUE_ARG,
157         ETH_AF_XDP_QUEUE_COUNT_ARG,
158         ETH_AF_XDP_SHARED_UMEM_ARG,
159         ETH_AF_XDP_PROG_ARG,
160         ETH_AF_XDP_BUDGET_ARG,
161         NULL
162 };
163
164 static const struct rte_eth_link pmd_link = {
165         .link_speed = ETH_SPEED_NUM_10G,
166         .link_duplex = ETH_LINK_FULL_DUPLEX,
167         .link_status = ETH_LINK_DOWN,
168         .link_autoneg = ETH_LINK_AUTONEG
169 };
170
171 /* List which tracks PMDs to facilitate sharing UMEMs across them. */
172 struct internal_list {
173         TAILQ_ENTRY(internal_list) next;
174         struct rte_eth_dev *eth_dev;
175 };
176
177 TAILQ_HEAD(internal_list_head, internal_list);
178 static struct internal_list_head internal_list =
179         TAILQ_HEAD_INITIALIZER(internal_list);
180
181 static pthread_mutex_t internal_list_lock = PTHREAD_MUTEX_INITIALIZER;
182
183 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
184 static inline int
185 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
186                       struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
187 {
188         uint32_t idx;
189         uint16_t i;
190
191         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
192                 for (i = 0; i < reserve_size; i++)
193                         rte_pktmbuf_free(bufs[i]);
194                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
195                 return -1;
196         }
197
198         for (i = 0; i < reserve_size; i++) {
199                 __u64 *fq_addr;
200                 uint64_t addr;
201
202                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
203                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
204                                 umem->mb_pool->header_size;
205                 *fq_addr = addr;
206         }
207
208         xsk_ring_prod__submit(fq, reserve_size);
209
210         return 0;
211 }
212 #else
213 static inline int
214 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
215                       struct rte_mbuf **bufs __rte_unused,
216                       struct xsk_ring_prod *fq)
217 {
218         void *addrs[reserve_size];
219         uint32_t idx;
220         uint16_t i;
221
222         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
223                     != reserve_size) {
224                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
225                 return -1;
226         }
227
228         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
229                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
230                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
231                                 reserve_size, NULL);
232                 return -1;
233         }
234
235         for (i = 0; i < reserve_size; i++) {
236                 __u64 *fq_addr;
237
238                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
239                 *fq_addr = (uint64_t)addrs[i];
240         }
241
242         xsk_ring_prod__submit(fq, reserve_size);
243
244         return 0;
245 }
246 #endif
247
248 static inline int
249 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
250                    struct rte_mbuf **bufs, struct xsk_ring_prod *fq)
251 {
252 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
253         return reserve_fill_queue_zc(umem, reserve_size, bufs, fq);
254 #else
255         return reserve_fill_queue_cp(umem, reserve_size, bufs, fq);
256 #endif
257 }
258
259 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
260 static uint16_t
261 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
262 {
263         struct pkt_rx_queue *rxq = queue;
264         struct xsk_ring_cons *rx = &rxq->rx;
265         struct xsk_ring_prod *fq = &rxq->fq;
266         struct xsk_umem_info *umem = rxq->umem;
267         uint32_t idx_rx = 0;
268         unsigned long rx_bytes = 0;
269         int i;
270         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
271
272         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
273
274         if (nb_pkts == 0) {
275                 /* we can assume a kernel >= 5.11 is in use if busy polling is
276                  * enabled and thus we can safely use the recvfrom() syscall
277                  * which is only supported for AF_XDP sockets in kernels >=
278                  * 5.11.
279                  */
280                 if (rxq->busy_budget) {
281                         (void)recvfrom(xsk_socket__fd(rxq->xsk), NULL, 0,
282                                        MSG_DONTWAIT, NULL, NULL);
283                 } else if (xsk_ring_prod__needs_wakeup(fq)) {
284                         (void)poll(&rxq->fds[0], 1, 1000);
285                 }
286
287                 return 0;
288         }
289
290         /* allocate bufs for fill queue replenishment after rx */
291         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
292                 AF_XDP_LOG(DEBUG,
293                         "Failed to get enough buffers for fq.\n");
294                 /* rollback cached_cons which is added by
295                  * xsk_ring_cons__peek
296                  */
297                 rx->cached_cons -= nb_pkts;
298                 return 0;
299         }
300
301         for (i = 0; i < nb_pkts; i++) {
302                 const struct xdp_desc *desc;
303                 uint64_t addr;
304                 uint32_t len;
305                 uint64_t offset;
306
307                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
308                 addr = desc->addr;
309                 len = desc->len;
310
311                 offset = xsk_umem__extract_offset(addr);
312                 addr = xsk_umem__extract_addr(addr);
313
314                 bufs[i] = (struct rte_mbuf *)
315                                 xsk_umem__get_data(umem->buffer, addr +
316                                         umem->mb_pool->header_size);
317                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
318                         rte_pktmbuf_priv_size(umem->mb_pool) -
319                         umem->mb_pool->header_size;
320
321                 rte_pktmbuf_pkt_len(bufs[i]) = len;
322                 rte_pktmbuf_data_len(bufs[i]) = len;
323                 rx_bytes += len;
324         }
325
326         xsk_ring_cons__release(rx, nb_pkts);
327         (void)reserve_fill_queue(umem, nb_pkts, fq_bufs, fq);
328
329         /* statistics */
330         rxq->stats.rx_pkts += nb_pkts;
331         rxq->stats.rx_bytes += rx_bytes;
332
333         return nb_pkts;
334 }
335 #else
336 static uint16_t
337 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
338 {
339         struct pkt_rx_queue *rxq = queue;
340         struct xsk_ring_cons *rx = &rxq->rx;
341         struct xsk_umem_info *umem = rxq->umem;
342         struct xsk_ring_prod *fq = &rxq->fq;
343         uint32_t idx_rx = 0;
344         unsigned long rx_bytes = 0;
345         int i;
346         uint32_t free_thresh = fq->size >> 1;
347         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
348
349         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
350                 (void)reserve_fill_queue(umem, nb_pkts, NULL, fq);
351
352         nb_pkts = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
353         if (nb_pkts == 0) {
354 #if defined(XDP_USE_NEED_WAKEUP)
355                 if (xsk_ring_prod__needs_wakeup(fq))
356                         (void)poll(rxq->fds, 1, 1000);
357 #endif
358                 return 0;
359         }
360
361         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts))) {
362                 /* rollback cached_cons which is added by
363                  * xsk_ring_cons__peek
364                  */
365                 rx->cached_cons -= nb_pkts;
366                 return 0;
367         }
368
369         for (i = 0; i < nb_pkts; i++) {
370                 const struct xdp_desc *desc;
371                 uint64_t addr;
372                 uint32_t len;
373                 void *pkt;
374
375                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
376                 addr = desc->addr;
377                 len = desc->len;
378                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
379
380                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
381                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
382                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
383                 rte_pktmbuf_data_len(mbufs[i]) = len;
384                 rx_bytes += len;
385                 bufs[i] = mbufs[i];
386         }
387
388         xsk_ring_cons__release(rx, nb_pkts);
389
390         /* statistics */
391         rxq->stats.rx_pkts += nb_pkts;
392         rxq->stats.rx_bytes += rx_bytes;
393
394         return nb_pkts;
395 }
396 #endif
397
398 static uint16_t
399 af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
400 {
401 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
402         return af_xdp_rx_zc(queue, bufs, nb_pkts);
403 #else
404         return af_xdp_rx_cp(queue, bufs, nb_pkts);
405 #endif
406 }
407
408 static uint16_t
409 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
410 {
411         uint16_t nb_rx;
412
413         if (likely(nb_pkts <= ETH_AF_XDP_RX_BATCH_SIZE))
414                 return af_xdp_rx(queue, bufs, nb_pkts);
415
416         /* Split larger batch into smaller batches of size
417          * ETH_AF_XDP_RX_BATCH_SIZE or less.
418          */
419         nb_rx = 0;
420         while (nb_pkts) {
421                 uint16_t ret, n;
422
423                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
424                 ret = af_xdp_rx(queue, &bufs[nb_rx], n);
425                 nb_rx = (uint16_t)(nb_rx + ret);
426                 nb_pkts = (uint16_t)(nb_pkts - ret);
427                 if (ret < n)
428                         break;
429         }
430
431         return nb_rx;
432 }
433
434 static void
435 pull_umem_cq(struct xsk_umem_info *umem, int size, struct xsk_ring_cons *cq)
436 {
437         size_t i, n;
438         uint32_t idx_cq = 0;
439
440         n = xsk_ring_cons__peek(cq, size, &idx_cq);
441
442         for (i = 0; i < n; i++) {
443                 uint64_t addr;
444                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
445 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
446                 addr = xsk_umem__extract_addr(addr);
447                 rte_pktmbuf_free((struct rte_mbuf *)
448                                         xsk_umem__get_data(umem->buffer,
449                                         addr + umem->mb_pool->header_size));
450 #else
451                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
452 #endif
453         }
454
455         xsk_ring_cons__release(cq, n);
456 }
457
458 static void
459 kick_tx(struct pkt_tx_queue *txq, struct xsk_ring_cons *cq)
460 {
461         struct xsk_umem_info *umem = txq->umem;
462
463         pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
464
465         if (tx_syscall_needed(&txq->tx))
466                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
467                             0, MSG_DONTWAIT) < 0) {
468                         /* some thing unexpected */
469                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
470                                 break;
471
472                         /* pull from completion queue to leave more space */
473                         if (errno == EAGAIN)
474                                 pull_umem_cq(umem,
475                                              XSK_RING_CONS__DEFAULT_NUM_DESCS,
476                                              cq);
477                 }
478 }
479
480 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
481 static uint16_t
482 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
483 {
484         struct pkt_tx_queue *txq = queue;
485         struct xsk_umem_info *umem = txq->umem;
486         struct rte_mbuf *mbuf;
487         unsigned long tx_bytes = 0;
488         int i;
489         uint32_t idx_tx;
490         uint16_t count = 0;
491         struct xdp_desc *desc;
492         uint64_t addr, offset;
493         struct xsk_ring_cons *cq = &txq->pair->cq;
494         uint32_t free_thresh = cq->size >> 1;
495
496         if (xsk_cons_nb_avail(cq, free_thresh) >= free_thresh)
497                 pull_umem_cq(umem, XSK_RING_CONS__DEFAULT_NUM_DESCS, cq);
498
499         for (i = 0; i < nb_pkts; i++) {
500                 mbuf = bufs[i];
501
502                 if (mbuf->pool == umem->mb_pool) {
503                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
504                                 kick_tx(txq, cq);
505                                 if (!xsk_ring_prod__reserve(&txq->tx, 1,
506                                                             &idx_tx))
507                                         goto out;
508                         }
509                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
510                         desc->len = mbuf->pkt_len;
511                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
512                                         umem->mb_pool->header_size;
513                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
514                                         (uint64_t)mbuf +
515                                         umem->mb_pool->header_size;
516                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
517                         desc->addr = addr | offset;
518                         count++;
519                 } else {
520                         struct rte_mbuf *local_mbuf =
521                                         rte_pktmbuf_alloc(umem->mb_pool);
522                         void *pkt;
523
524                         if (local_mbuf == NULL)
525                                 goto out;
526
527                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
528                                 rte_pktmbuf_free(local_mbuf);
529                                 kick_tx(txq, cq);
530                                 goto out;
531                         }
532
533                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
534                         desc->len = mbuf->pkt_len;
535
536                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
537                                         umem->mb_pool->header_size;
538                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
539                                         (uint64_t)local_mbuf +
540                                         umem->mb_pool->header_size;
541                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
542                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
543                         desc->addr = addr | offset;
544                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
545                                         desc->len);
546                         rte_pktmbuf_free(mbuf);
547                         count++;
548                 }
549
550                 tx_bytes += mbuf->pkt_len;
551         }
552
553         kick_tx(txq, cq);
554
555 out:
556         xsk_ring_prod__submit(&txq->tx, count);
557
558         txq->stats.tx_pkts += count;
559         txq->stats.tx_bytes += tx_bytes;
560         txq->stats.tx_dropped += nb_pkts - count;
561
562         return count;
563 }
564 #else
565 static uint16_t
566 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
567 {
568         struct pkt_tx_queue *txq = queue;
569         struct xsk_umem_info *umem = txq->umem;
570         struct rte_mbuf *mbuf;
571         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
572         unsigned long tx_bytes = 0;
573         int i;
574         uint32_t idx_tx;
575         struct xsk_ring_cons *cq = &txq->pair->cq;
576
577         pull_umem_cq(umem, nb_pkts, cq);
578
579         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
580                                         nb_pkts, NULL);
581         if (nb_pkts == 0)
582                 return 0;
583
584         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
585                 kick_tx(txq, cq);
586                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
587                 return 0;
588         }
589
590         for (i = 0; i < nb_pkts; i++) {
591                 struct xdp_desc *desc;
592                 void *pkt;
593
594                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
595                 mbuf = bufs[i];
596                 desc->len = mbuf->pkt_len;
597
598                 desc->addr = (uint64_t)addrs[i];
599                 pkt = xsk_umem__get_data(umem->mz->addr,
600                                          desc->addr);
601                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
602                 tx_bytes += mbuf->pkt_len;
603                 rte_pktmbuf_free(mbuf);
604         }
605
606         xsk_ring_prod__submit(&txq->tx, nb_pkts);
607
608         kick_tx(txq, cq);
609
610         txq->stats.tx_pkts += nb_pkts;
611         txq->stats.tx_bytes += tx_bytes;
612
613         return nb_pkts;
614 }
615
616 static uint16_t
617 af_xdp_tx_cp_batch(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
618 {
619         uint16_t nb_tx;
620
621         if (likely(nb_pkts <= ETH_AF_XDP_TX_BATCH_SIZE))
622                 return af_xdp_tx_cp(queue, bufs, nb_pkts);
623
624         nb_tx = 0;
625         while (nb_pkts) {
626                 uint16_t ret, n;
627
628                 /* Split larger batch into smaller batches of size
629                  * ETH_AF_XDP_TX_BATCH_SIZE or less.
630                  */
631                 n = (uint16_t)RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
632                 ret = af_xdp_tx_cp(queue, &bufs[nb_tx], n);
633                 nb_tx = (uint16_t)(nb_tx + ret);
634                 nb_pkts = (uint16_t)(nb_pkts - ret);
635                 if (ret < n)
636                         break;
637         }
638
639         return nb_tx;
640 }
641 #endif
642
643 static uint16_t
644 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
645 {
646 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
647         return af_xdp_tx_zc(queue, bufs, nb_pkts);
648 #else
649         return af_xdp_tx_cp_batch(queue, bufs, nb_pkts);
650 #endif
651 }
652
653 static int
654 eth_dev_start(struct rte_eth_dev *dev)
655 {
656         dev->data->dev_link.link_status = ETH_LINK_UP;
657
658         return 0;
659 }
660
661 /* This function gets called when the current port gets stopped. */
662 static int
663 eth_dev_stop(struct rte_eth_dev *dev)
664 {
665         dev->data->dev_link.link_status = ETH_LINK_DOWN;
666         return 0;
667 }
668
669 /* Find ethdev in list */
670 static inline struct internal_list *
671 find_internal_resource(struct pmd_internals *port_int)
672 {
673         int found = 0;
674         struct internal_list *list = NULL;
675
676         if (port_int == NULL)
677                 return NULL;
678
679         pthread_mutex_lock(&internal_list_lock);
680
681         TAILQ_FOREACH(list, &internal_list, next) {
682                 struct pmd_internals *list_int =
683                                 list->eth_dev->data->dev_private;
684                 if (list_int == port_int) {
685                         found = 1;
686                         break;
687                 }
688         }
689
690         pthread_mutex_unlock(&internal_list_lock);
691
692         if (!found)
693                 return NULL;
694
695         return list;
696 }
697
698 /* Check if the netdev,qid context already exists */
699 static inline bool
700 ctx_exists(struct pkt_rx_queue *rxq, const char *ifname,
701                 struct pkt_rx_queue *list_rxq, const char *list_ifname)
702 {
703         bool exists = false;
704
705         if (rxq->xsk_queue_idx == list_rxq->xsk_queue_idx &&
706                         !strncmp(ifname, list_ifname, IFNAMSIZ)) {
707                 AF_XDP_LOG(ERR, "ctx %s,%i already exists, cannot share umem\n",
708                                         ifname, rxq->xsk_queue_idx);
709                 exists = true;
710         }
711
712         return exists;
713 }
714
715 /* Get a pointer to an existing UMEM which overlays the rxq's mb_pool */
716 static inline int
717 get_shared_umem(struct pkt_rx_queue *rxq, const char *ifname,
718                         struct xsk_umem_info **umem)
719 {
720         struct internal_list *list;
721         struct pmd_internals *internals;
722         int i = 0, ret = 0;
723         struct rte_mempool *mb_pool = rxq->mb_pool;
724
725         if (mb_pool == NULL)
726                 return ret;
727
728         pthread_mutex_lock(&internal_list_lock);
729
730         TAILQ_FOREACH(list, &internal_list, next) {
731                 internals = list->eth_dev->data->dev_private;
732                 for (i = 0; i < internals->queue_cnt; i++) {
733                         struct pkt_rx_queue *list_rxq =
734                                                 &internals->rx_queues[i];
735                         if (rxq == list_rxq)
736                                 continue;
737                         if (mb_pool == internals->rx_queues[i].mb_pool) {
738                                 if (ctx_exists(rxq, ifname, list_rxq,
739                                                 internals->if_name)) {
740                                         ret = -1;
741                                         goto out;
742                                 }
743                                 if (__atomic_load_n(
744                                         &internals->rx_queues[i].umem->refcnt,
745                                                         __ATOMIC_ACQUIRE)) {
746                                         *umem = internals->rx_queues[i].umem;
747                                         goto out;
748                                 }
749                         }
750                 }
751         }
752
753 out:
754         pthread_mutex_unlock(&internal_list_lock);
755
756         return ret;
757 }
758
759 static int
760 eth_dev_configure(struct rte_eth_dev *dev)
761 {
762         struct pmd_internals *internal = dev->data->dev_private;
763
764         /* rx/tx must be paired */
765         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
766                 return -EINVAL;
767
768         if (internal->shared_umem) {
769                 struct internal_list *list = NULL;
770                 const char *name = dev->device->name;
771
772                 /* Ensure PMD is not already inserted into the list */
773                 list = find_internal_resource(internal);
774                 if (list)
775                         return 0;
776
777                 list = rte_zmalloc_socket(name, sizeof(*list), 0,
778                                         dev->device->numa_node);
779                 if (list == NULL)
780                         return -1;
781
782                 list->eth_dev = dev;
783                 pthread_mutex_lock(&internal_list_lock);
784                 TAILQ_INSERT_TAIL(&internal_list, list, next);
785                 pthread_mutex_unlock(&internal_list_lock);
786         }
787
788         return 0;
789 }
790
791 static int
792 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
793 {
794         struct pmd_internals *internals = dev->data->dev_private;
795
796         dev_info->if_index = internals->if_index;
797         dev_info->max_mac_addrs = 1;
798         dev_info->max_rx_pktlen = ETH_FRAME_LEN;
799         dev_info->max_rx_queues = internals->queue_cnt;
800         dev_info->max_tx_queues = internals->queue_cnt;
801
802         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
803 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
804         dev_info->max_mtu = getpagesize() -
805                                 sizeof(struct rte_mempool_objhdr) -
806                                 sizeof(struct rte_mbuf) -
807                                 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
808 #else
809         dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
810 #endif
811
812         dev_info->default_rxportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
813         dev_info->default_txportconf.burst_size = ETH_AF_XDP_DFLT_BUSY_BUDGET;
814         dev_info->default_rxportconf.nb_queues = 1;
815         dev_info->default_txportconf.nb_queues = 1;
816         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
817         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
818
819         return 0;
820 }
821
822 static int
823 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
824 {
825         struct pmd_internals *internals = dev->data->dev_private;
826         struct xdp_statistics xdp_stats;
827         struct pkt_rx_queue *rxq;
828         struct pkt_tx_queue *txq;
829         socklen_t optlen;
830         int i, ret;
831
832         for (i = 0; i < dev->data->nb_rx_queues; i++) {
833                 optlen = sizeof(struct xdp_statistics);
834                 rxq = &internals->rx_queues[i];
835                 txq = rxq->pair;
836                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
837                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
838
839                 stats->q_opackets[i] = txq->stats.tx_pkts;
840                 stats->q_obytes[i] = txq->stats.tx_bytes;
841
842                 stats->ipackets += stats->q_ipackets[i];
843                 stats->ibytes += stats->q_ibytes[i];
844                 stats->imissed += rxq->stats.rx_dropped;
845                 stats->oerrors += txq->stats.tx_dropped;
846                 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
847                                 XDP_STATISTICS, &xdp_stats, &optlen);
848                 if (ret != 0) {
849                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
850                         return -1;
851                 }
852                 stats->imissed += xdp_stats.rx_dropped;
853
854                 stats->opackets += stats->q_opackets[i];
855                 stats->obytes += stats->q_obytes[i];
856         }
857
858         return 0;
859 }
860
861 static int
862 eth_stats_reset(struct rte_eth_dev *dev)
863 {
864         struct pmd_internals *internals = dev->data->dev_private;
865         int i;
866
867         for (i = 0; i < internals->queue_cnt; i++) {
868                 memset(&internals->rx_queues[i].stats, 0,
869                                         sizeof(struct rx_stats));
870                 memset(&internals->tx_queues[i].stats, 0,
871                                         sizeof(struct tx_stats));
872         }
873
874         return 0;
875 }
876
877 static void
878 remove_xdp_program(struct pmd_internals *internals)
879 {
880         uint32_t curr_prog_id = 0;
881
882         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
883                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
884                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
885                 return;
886         }
887         bpf_set_link_xdp_fd(internals->if_index, -1,
888                         XDP_FLAGS_UPDATE_IF_NOEXIST);
889 }
890
891 static void
892 xdp_umem_destroy(struct xsk_umem_info *umem)
893 {
894 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
895         umem->mb_pool = NULL;
896 #else
897         rte_memzone_free(umem->mz);
898         umem->mz = NULL;
899
900         rte_ring_free(umem->buf_ring);
901         umem->buf_ring = NULL;
902 #endif
903
904         rte_free(umem);
905 }
906
907 static int
908 eth_dev_close(struct rte_eth_dev *dev)
909 {
910         struct pmd_internals *internals = dev->data->dev_private;
911         struct pkt_rx_queue *rxq;
912         int i;
913
914         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
915                 return 0;
916
917         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
918                 rte_socket_id());
919
920         for (i = 0; i < internals->queue_cnt; i++) {
921                 rxq = &internals->rx_queues[i];
922                 if (rxq->umem == NULL)
923                         break;
924                 xsk_socket__delete(rxq->xsk);
925
926                 if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE)
927                                 == 0) {
928                         (void)xsk_umem__delete(rxq->umem->umem);
929                         xdp_umem_destroy(rxq->umem);
930                 }
931
932                 /* free pkt_tx_queue */
933                 rte_free(rxq->pair);
934                 rte_free(rxq);
935         }
936
937         /*
938          * MAC is not allocated dynamically, setting it to NULL would prevent
939          * from releasing it in rte_eth_dev_release_port.
940          */
941         dev->data->mac_addrs = NULL;
942
943         remove_xdp_program(internals);
944
945         if (internals->shared_umem) {
946                 struct internal_list *list;
947
948                 /* Remove ethdev from list used to track and share UMEMs */
949                 list = find_internal_resource(internals);
950                 if (list) {
951                         pthread_mutex_lock(&internal_list_lock);
952                         TAILQ_REMOVE(&internal_list, list, next);
953                         pthread_mutex_unlock(&internal_list_lock);
954                         rte_free(list);
955                 }
956         }
957
958         return 0;
959 }
960
961 static void
962 eth_queue_release(void *q __rte_unused)
963 {
964 }
965
966 static int
967 eth_link_update(struct rte_eth_dev *dev __rte_unused,
968                 int wait_to_complete __rte_unused)
969 {
970         return 0;
971 }
972
973 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
974 static inline uintptr_t get_base_addr(struct rte_mempool *mp, uint64_t *align)
975 {
976         struct rte_mempool_memhdr *memhdr;
977         uintptr_t memhdr_addr, aligned_addr;
978
979         memhdr = STAILQ_FIRST(&mp->mem_list);
980         memhdr_addr = (uintptr_t)memhdr->addr;
981         aligned_addr = memhdr_addr & ~(getpagesize() - 1);
982         *align = memhdr_addr - aligned_addr;
983
984         return aligned_addr;
985 }
986
987 static struct
988 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
989                                   struct pkt_rx_queue *rxq)
990 {
991         struct xsk_umem_info *umem = NULL;
992         int ret;
993         struct xsk_umem_config usr_config = {
994                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS * 2,
995                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
996                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
997         void *base_addr = NULL;
998         struct rte_mempool *mb_pool = rxq->mb_pool;
999         uint64_t umem_size, align = 0;
1000
1001         if (internals->shared_umem) {
1002                 if (get_shared_umem(rxq, internals->if_name, &umem) < 0)
1003                         return NULL;
1004
1005                 if (umem != NULL &&
1006                         __atomic_load_n(&umem->refcnt, __ATOMIC_ACQUIRE) <
1007                                         umem->max_xsks) {
1008                         AF_XDP_LOG(INFO, "%s,qid%i sharing UMEM\n",
1009                                         internals->if_name, rxq->xsk_queue_idx);
1010                         __atomic_fetch_add(&umem->refcnt, 1, __ATOMIC_ACQUIRE);
1011                 }
1012         }
1013
1014         if (umem == NULL) {
1015                 usr_config.frame_size =
1016                         rte_mempool_calc_obj_size(mb_pool->elt_size,
1017                                                   mb_pool->flags, NULL);
1018                 usr_config.frame_headroom = mb_pool->header_size +
1019                                                 sizeof(struct rte_mbuf) +
1020                                                 rte_pktmbuf_priv_size(mb_pool) +
1021                                                 RTE_PKTMBUF_HEADROOM;
1022
1023                 umem = rte_zmalloc_socket("umem", sizeof(*umem), 0,
1024                                           rte_socket_id());
1025                 if (umem == NULL) {
1026                         AF_XDP_LOG(ERR, "Failed to allocate umem info");
1027                         return NULL;
1028                 }
1029
1030                 umem->mb_pool = mb_pool;
1031                 base_addr = (void *)get_base_addr(mb_pool, &align);
1032                 umem_size = (uint64_t)mb_pool->populated_size *
1033                                 (uint64_t)usr_config.frame_size +
1034                                 align;
1035
1036                 ret = xsk_umem__create(&umem->umem, base_addr, umem_size,
1037                                 &rxq->fq, &rxq->cq, &usr_config);
1038                 if (ret) {
1039                         AF_XDP_LOG(ERR, "Failed to create umem");
1040                         goto err;
1041                 }
1042                 umem->buffer = base_addr;
1043
1044                 if (internals->shared_umem) {
1045                         umem->max_xsks = mb_pool->populated_size /
1046                                                 ETH_AF_XDP_NUM_BUFFERS;
1047                         AF_XDP_LOG(INFO, "Max xsks for UMEM %s: %u\n",
1048                                                 mb_pool->name, umem->max_xsks);
1049                 }
1050
1051                 __atomic_store_n(&umem->refcnt, 1, __ATOMIC_RELEASE);
1052         }
1053
1054 #else
1055 static struct
1056 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
1057                                   struct pkt_rx_queue *rxq)
1058 {
1059         struct xsk_umem_info *umem;
1060         const struct rte_memzone *mz;
1061         struct xsk_umem_config usr_config = {
1062                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1063                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
1064                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
1065                 .frame_headroom = 0 };
1066         char ring_name[RTE_RING_NAMESIZE];
1067         char mz_name[RTE_MEMZONE_NAMESIZE];
1068         int ret;
1069         uint64_t i;
1070
1071         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
1072         if (umem == NULL) {
1073                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
1074                 return NULL;
1075         }
1076
1077         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
1078                        internals->if_name, rxq->xsk_queue_idx);
1079         umem->buf_ring = rte_ring_create(ring_name,
1080                                          ETH_AF_XDP_NUM_BUFFERS,
1081                                          rte_socket_id(),
1082                                          0x0);
1083         if (umem->buf_ring == NULL) {
1084                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
1085                 goto err;
1086         }
1087
1088         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
1089                 rte_ring_enqueue(umem->buf_ring,
1090                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
1091
1092         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
1093                        internals->if_name, rxq->xsk_queue_idx);
1094         mz = rte_memzone_reserve_aligned(mz_name,
1095                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1096                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
1097                         getpagesize());
1098         if (mz == NULL) {
1099                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
1100                 goto err;
1101         }
1102
1103         ret = xsk_umem__create(&umem->umem, mz->addr,
1104                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
1105                                &rxq->fq, &rxq->cq,
1106                                &usr_config);
1107
1108         if (ret) {
1109                 AF_XDP_LOG(ERR, "Failed to create umem");
1110                 goto err;
1111         }
1112         umem->mz = mz;
1113
1114 #endif
1115         return umem;
1116
1117 err:
1118         xdp_umem_destroy(umem);
1119         return NULL;
1120 }
1121
1122 static int
1123 load_custom_xdp_prog(const char *prog_path, int if_index)
1124 {
1125         int ret, prog_fd = -1;
1126         struct bpf_object *obj;
1127         struct bpf_map *map;
1128
1129         ret = bpf_prog_load(prog_path, BPF_PROG_TYPE_XDP, &obj, &prog_fd);
1130         if (ret) {
1131                 AF_XDP_LOG(ERR, "Failed to load program %s\n", prog_path);
1132                 return ret;
1133         }
1134
1135         /*
1136          * The loaded program must provision for a map of xsks, such that some
1137          * traffic can be redirected to userspace. When the xsk is created,
1138          * libbpf inserts it into the map.
1139          */
1140         map = bpf_object__find_map_by_name(obj, "xsks_map");
1141         if (!map) {
1142                 AF_XDP_LOG(ERR, "Failed to find xsks_map in %s\n", prog_path);
1143                 return -1;
1144         }
1145
1146         /* Link the program with the given network device */
1147         ret = bpf_set_link_xdp_fd(if_index, prog_fd,
1148                                         XDP_FLAGS_UPDATE_IF_NOEXIST);
1149         if (ret) {
1150                 AF_XDP_LOG(ERR, "Failed to set prog fd %d on interface\n",
1151                                 prog_fd);
1152                 return -1;
1153         }
1154
1155         AF_XDP_LOG(INFO, "Successfully loaded XDP program %s with fd %d\n",
1156                                 prog_path, prog_fd);
1157
1158         return 0;
1159 }
1160
1161 /* Detect support for busy polling through setsockopt(). */
1162 static int
1163 configure_preferred_busy_poll(struct pkt_rx_queue *rxq)
1164 {
1165         int sock_opt = 1;
1166         int fd = xsk_socket__fd(rxq->xsk);
1167         int ret = 0;
1168
1169         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1170                         (void *)&sock_opt, sizeof(sock_opt));
1171         if (ret < 0) {
1172                 AF_XDP_LOG(DEBUG, "Failed to set SO_PREFER_BUSY_POLL\n");
1173                 goto err_prefer;
1174         }
1175
1176         sock_opt = ETH_AF_XDP_DFLT_BUSY_TIMEOUT;
1177         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1178                         sizeof(sock_opt));
1179         if (ret < 0) {
1180                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL\n");
1181                 goto err_timeout;
1182         }
1183
1184         sock_opt = rxq->busy_budget;
1185         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1186                         (void *)&sock_opt, sizeof(sock_opt));
1187         if (ret < 0) {
1188                 AF_XDP_LOG(DEBUG, "Failed to set SO_BUSY_POLL_BUDGET\n");
1189         } else {
1190                 AF_XDP_LOG(INFO, "Busy polling budget set to: %u\n",
1191                                         rxq->busy_budget);
1192                 return 0;
1193         }
1194
1195         /* setsockopt failure - attempt to restore xsk to default state and
1196          * proceed without busy polling support.
1197          */
1198         sock_opt = 0;
1199         ret = setsockopt(fd, SOL_SOCKET, SO_BUSY_POLL, (void *)&sock_opt,
1200                         sizeof(sock_opt));
1201         if (ret < 0) {
1202                 AF_XDP_LOG(ERR, "Failed to unset SO_BUSY_POLL\n");
1203                 return -1;
1204         }
1205
1206 err_timeout:
1207         sock_opt = 0;
1208         ret = setsockopt(fd, SOL_SOCKET, SO_PREFER_BUSY_POLL,
1209                         (void *)&sock_opt, sizeof(sock_opt));
1210         if (ret < 0) {
1211                 AF_XDP_LOG(ERR, "Failed to unset SO_PREFER_BUSY_POLL\n");
1212                 return -1;
1213         }
1214
1215 err_prefer:
1216         rxq->busy_budget = 0;
1217         return 0;
1218 }
1219
1220 static int
1221 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
1222               int ring_size)
1223 {
1224         struct xsk_socket_config cfg;
1225         struct pkt_tx_queue *txq = rxq->pair;
1226         int ret = 0;
1227         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS;
1228         struct rte_mbuf *fq_bufs[reserve_size];
1229
1230         rxq->umem = xdp_umem_configure(internals, rxq);
1231         if (rxq->umem == NULL)
1232                 return -ENOMEM;
1233         txq->umem = rxq->umem;
1234
1235         cfg.rx_size = ring_size;
1236         cfg.tx_size = ring_size;
1237         cfg.libbpf_flags = 0;
1238         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
1239         cfg.bind_flags = 0;
1240
1241 #if defined(XDP_USE_NEED_WAKEUP)
1242         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
1243 #endif
1244
1245         if (strnlen(internals->prog_path, PATH_MAX) &&
1246                                 !internals->custom_prog_configured) {
1247                 ret = load_custom_xdp_prog(internals->prog_path,
1248                                            internals->if_index);
1249                 if (ret) {
1250                         AF_XDP_LOG(ERR, "Failed to load custom XDP program %s\n",
1251                                         internals->prog_path);
1252                         goto err;
1253                 }
1254                 internals->custom_prog_configured = 1;
1255         }
1256
1257         if (internals->shared_umem)
1258                 ret = create_shared_socket(&rxq->xsk, internals->if_name,
1259                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1260                                 &txq->tx, &rxq->fq, &rxq->cq, &cfg);
1261         else
1262                 ret = xsk_socket__create(&rxq->xsk, internals->if_name,
1263                                 rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
1264                                 &txq->tx, &cfg);
1265
1266         if (ret) {
1267                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
1268                 goto err;
1269         }
1270
1271 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1272         ret = rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size);
1273         if (ret) {
1274                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
1275                 goto err;
1276         }
1277 #endif
1278
1279         if (rxq->busy_budget) {
1280                 ret = configure_preferred_busy_poll(rxq);
1281                 if (ret) {
1282                         AF_XDP_LOG(ERR, "Failed configure busy polling.\n");
1283                         goto err;
1284                 }
1285         }
1286
1287         ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs, &rxq->fq);
1288         if (ret) {
1289                 xsk_socket__delete(rxq->xsk);
1290                 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
1291                 goto err;
1292         }
1293
1294         return 0;
1295
1296 err:
1297         if (__atomic_sub_fetch(&rxq->umem->refcnt, 1, __ATOMIC_ACQUIRE) == 0)
1298                 xdp_umem_destroy(rxq->umem);
1299
1300         return ret;
1301 }
1302
1303 static int
1304 eth_rx_queue_setup(struct rte_eth_dev *dev,
1305                    uint16_t rx_queue_id,
1306                    uint16_t nb_rx_desc,
1307                    unsigned int socket_id __rte_unused,
1308                    const struct rte_eth_rxconf *rx_conf __rte_unused,
1309                    struct rte_mempool *mb_pool)
1310 {
1311         struct pmd_internals *internals = dev->data->dev_private;
1312         struct pkt_rx_queue *rxq;
1313         int ret;
1314
1315         rxq = &internals->rx_queues[rx_queue_id];
1316
1317         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
1318                    rx_queue_id, rxq->xsk_queue_idx);
1319
1320 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
1321         uint32_t buf_size, data_size;
1322
1323         /* Now get the space available for data in the mbuf */
1324         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
1325                 RTE_PKTMBUF_HEADROOM;
1326         data_size = ETH_AF_XDP_FRAME_SIZE;
1327
1328         if (data_size > buf_size) {
1329                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
1330                         dev->device->name, data_size, buf_size);
1331                 ret = -ENOMEM;
1332                 goto err;
1333         }
1334 #endif
1335
1336         rxq->mb_pool = mb_pool;
1337
1338         if (xsk_configure(internals, rxq, nb_rx_desc)) {
1339                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
1340                 ret = -EINVAL;
1341                 goto err;
1342         }
1343
1344         if (!rxq->busy_budget)
1345                 AF_XDP_LOG(DEBUG, "Preferred busy polling not enabled\n");
1346
1347         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
1348         rxq->fds[0].events = POLLIN;
1349
1350         dev->data->rx_queues[rx_queue_id] = rxq;
1351         return 0;
1352
1353 err:
1354         return ret;
1355 }
1356
1357 static int
1358 eth_tx_queue_setup(struct rte_eth_dev *dev,
1359                    uint16_t tx_queue_id,
1360                    uint16_t nb_tx_desc __rte_unused,
1361                    unsigned int socket_id __rte_unused,
1362                    const struct rte_eth_txconf *tx_conf __rte_unused)
1363 {
1364         struct pmd_internals *internals = dev->data->dev_private;
1365         struct pkt_tx_queue *txq;
1366
1367         txq = &internals->tx_queues[tx_queue_id];
1368
1369         dev->data->tx_queues[tx_queue_id] = txq;
1370         return 0;
1371 }
1372
1373 static int
1374 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1375 {
1376         struct pmd_internals *internals = dev->data->dev_private;
1377         struct ifreq ifr = { .ifr_mtu = mtu };
1378         int ret;
1379         int s;
1380
1381         s = socket(PF_INET, SOCK_DGRAM, 0);
1382         if (s < 0)
1383                 return -EINVAL;
1384
1385         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
1386         ret = ioctl(s, SIOCSIFMTU, &ifr);
1387         close(s);
1388
1389         return (ret < 0) ? -errno : 0;
1390 }
1391
1392 static int
1393 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1394 {
1395         struct ifreq ifr;
1396         int ret = 0;
1397         int s;
1398
1399         s = socket(PF_INET, SOCK_DGRAM, 0);
1400         if (s < 0)
1401                 return -errno;
1402
1403         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1404         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1405                 ret = -errno;
1406                 goto out;
1407         }
1408         ifr.ifr_flags &= mask;
1409         ifr.ifr_flags |= flags;
1410         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1411                 ret = -errno;
1412                 goto out;
1413         }
1414 out:
1415         close(s);
1416         return ret;
1417 }
1418
1419 static int
1420 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1421 {
1422         struct pmd_internals *internals = dev->data->dev_private;
1423
1424         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1425 }
1426
1427 static int
1428 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1429 {
1430         struct pmd_internals *internals = dev->data->dev_private;
1431
1432         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1433 }
1434
1435 static const struct eth_dev_ops ops = {
1436         .dev_start = eth_dev_start,
1437         .dev_stop = eth_dev_stop,
1438         .dev_close = eth_dev_close,
1439         .dev_configure = eth_dev_configure,
1440         .dev_infos_get = eth_dev_info,
1441         .mtu_set = eth_dev_mtu_set,
1442         .promiscuous_enable = eth_dev_promiscuous_enable,
1443         .promiscuous_disable = eth_dev_promiscuous_disable,
1444         .rx_queue_setup = eth_rx_queue_setup,
1445         .tx_queue_setup = eth_tx_queue_setup,
1446         .rx_queue_release = eth_queue_release,
1447         .tx_queue_release = eth_queue_release,
1448         .link_update = eth_link_update,
1449         .stats_get = eth_stats_get,
1450         .stats_reset = eth_stats_reset,
1451 };
1452
1453 /** parse busy_budget argument */
1454 static int
1455 parse_budget_arg(const char *key __rte_unused,
1456                   const char *value, void *extra_args)
1457 {
1458         int *i = (int *)extra_args;
1459         char *end;
1460
1461         *i = strtol(value, &end, 10);
1462         if (*i < 0 || *i > UINT16_MAX) {
1463                 AF_XDP_LOG(ERR, "Invalid busy_budget %i, must be >= 0 and <= %u\n",
1464                                 *i, UINT16_MAX);
1465                 return -EINVAL;
1466         }
1467
1468         return 0;
1469 }
1470
1471 /** parse integer from integer argument */
1472 static int
1473 parse_integer_arg(const char *key __rte_unused,
1474                   const char *value, void *extra_args)
1475 {
1476         int *i = (int *)extra_args;
1477         char *end;
1478
1479         *i = strtol(value, &end, 10);
1480         if (*i < 0) {
1481                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1482                 return -EINVAL;
1483         }
1484
1485         return 0;
1486 }
1487
1488 /** parse name argument */
1489 static int
1490 parse_name_arg(const char *key __rte_unused,
1491                const char *value, void *extra_args)
1492 {
1493         char *name = extra_args;
1494
1495         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1496                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1497                            value, IFNAMSIZ);
1498                 return -EINVAL;
1499         }
1500
1501         strlcpy(name, value, IFNAMSIZ);
1502
1503         return 0;
1504 }
1505
1506 /** parse xdp prog argument */
1507 static int
1508 parse_prog_arg(const char *key __rte_unused,
1509                const char *value, void *extra_args)
1510 {
1511         char *path = extra_args;
1512
1513         if (strnlen(value, PATH_MAX) == PATH_MAX) {
1514                 AF_XDP_LOG(ERR, "Invalid path %s, should be less than %u bytes.\n",
1515                            value, PATH_MAX);
1516                 return -EINVAL;
1517         }
1518
1519         if (access(value, F_OK) != 0) {
1520                 AF_XDP_LOG(ERR, "Error accessing %s: %s\n",
1521                            value, strerror(errno));
1522                 return -EINVAL;
1523         }
1524
1525         strlcpy(path, value, PATH_MAX);
1526
1527         return 0;
1528 }
1529
1530 static int
1531 xdp_get_channels_info(const char *if_name, int *max_queues,
1532                                 int *combined_queues)
1533 {
1534         struct ethtool_channels channels;
1535         struct ifreq ifr;
1536         int fd, ret;
1537
1538         fd = socket(AF_INET, SOCK_DGRAM, 0);
1539         if (fd < 0)
1540                 return -1;
1541
1542         channels.cmd = ETHTOOL_GCHANNELS;
1543         ifr.ifr_data = (void *)&channels;
1544         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1545         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1546         if (ret) {
1547                 if (errno == EOPNOTSUPP) {
1548                         ret = 0;
1549                 } else {
1550                         ret = -errno;
1551                         goto out;
1552                 }
1553         }
1554
1555         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1556                 /* If the device says it has no channels, then all traffic
1557                  * is sent to a single stream, so max queues = 1.
1558                  */
1559                 *max_queues = 1;
1560                 *combined_queues = 1;
1561         } else {
1562                 *max_queues = channels.max_combined;
1563                 *combined_queues = channels.combined_count;
1564         }
1565
1566  out:
1567         close(fd);
1568         return ret;
1569 }
1570
1571 static int
1572 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1573                         int *queue_cnt, int *shared_umem, char *prog_path,
1574                         int *busy_budget)
1575 {
1576         int ret;
1577
1578         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1579                                  &parse_name_arg, if_name);
1580         if (ret < 0)
1581                 goto free_kvlist;
1582
1583         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1584                                  &parse_integer_arg, start_queue);
1585         if (ret < 0)
1586                 goto free_kvlist;
1587
1588         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1589                                  &parse_integer_arg, queue_cnt);
1590         if (ret < 0 || *queue_cnt <= 0) {
1591                 ret = -EINVAL;
1592                 goto free_kvlist;
1593         }
1594
1595         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_SHARED_UMEM_ARG,
1596                                 &parse_integer_arg, shared_umem);
1597         if (ret < 0)
1598                 goto free_kvlist;
1599
1600         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_PROG_ARG,
1601                                  &parse_prog_arg, prog_path);
1602         if (ret < 0)
1603                 goto free_kvlist;
1604
1605         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_BUDGET_ARG,
1606                                 &parse_budget_arg, busy_budget);
1607         if (ret < 0)
1608                 goto free_kvlist;
1609
1610 free_kvlist:
1611         rte_kvargs_free(kvlist);
1612         return ret;
1613 }
1614
1615 static int
1616 get_iface_info(const char *if_name,
1617                struct rte_ether_addr *eth_addr,
1618                int *if_index)
1619 {
1620         struct ifreq ifr;
1621         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1622
1623         if (sock < 0)
1624                 return -1;
1625
1626         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1627         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1628                 goto error;
1629
1630         *if_index = ifr.ifr_ifindex;
1631
1632         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1633                 goto error;
1634
1635         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1636
1637         close(sock);
1638         return 0;
1639
1640 error:
1641         close(sock);
1642         return -1;
1643 }
1644
1645 static struct rte_eth_dev *
1646 init_internals(struct rte_vdev_device *dev, const char *if_name,
1647                 int start_queue_idx, int queue_cnt, int shared_umem,
1648                 const char *prog_path, int busy_budget)
1649 {
1650         const char *name = rte_vdev_device_name(dev);
1651         const unsigned int numa_node = dev->device.numa_node;
1652         struct pmd_internals *internals;
1653         struct rte_eth_dev *eth_dev;
1654         int ret;
1655         int i;
1656
1657         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1658         if (internals == NULL)
1659                 return NULL;
1660
1661         internals->start_queue_idx = start_queue_idx;
1662         internals->queue_cnt = queue_cnt;
1663         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1664         strlcpy(internals->prog_path, prog_path, PATH_MAX);
1665         internals->custom_prog_configured = 0;
1666
1667 #ifndef ETH_AF_XDP_SHARED_UMEM
1668         if (shared_umem) {
1669                 AF_XDP_LOG(ERR, "Shared UMEM feature not available. "
1670                                 "Check kernel and libbpf version\n");
1671                 goto err_free_internals;
1672         }
1673 #endif
1674         internals->shared_umem = shared_umem;
1675
1676         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1677                                   &internals->combined_queue_cnt)) {
1678                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1679                                 if_name);
1680                 goto err_free_internals;
1681         }
1682
1683         if (queue_cnt > internals->combined_queue_cnt) {
1684                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1685                                 queue_cnt, internals->combined_queue_cnt);
1686                 goto err_free_internals;
1687         }
1688
1689         internals->rx_queues = rte_zmalloc_socket(NULL,
1690                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1691                                         0, numa_node);
1692         if (internals->rx_queues == NULL) {
1693                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1694                 goto err_free_internals;
1695         }
1696
1697         internals->tx_queues = rte_zmalloc_socket(NULL,
1698                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1699                                         0, numa_node);
1700         if (internals->tx_queues == NULL) {
1701                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1702                 goto err_free_rx;
1703         }
1704         for (i = 0; i < queue_cnt; i++) {
1705                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1706                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1707                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1708                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1709                 internals->rx_queues[i].busy_budget = busy_budget;
1710         }
1711
1712         ret = get_iface_info(if_name, &internals->eth_addr,
1713                              &internals->if_index);
1714         if (ret)
1715                 goto err_free_tx;
1716
1717         eth_dev = rte_eth_vdev_allocate(dev, 0);
1718         if (eth_dev == NULL)
1719                 goto err_free_tx;
1720
1721         eth_dev->data->dev_private = internals;
1722         eth_dev->data->dev_link = pmd_link;
1723         eth_dev->data->mac_addrs = &internals->eth_addr;
1724         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1725         eth_dev->dev_ops = &ops;
1726         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1727         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1728
1729 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1730         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1731 #endif
1732
1733         return eth_dev;
1734
1735 err_free_tx:
1736         rte_free(internals->tx_queues);
1737 err_free_rx:
1738         rte_free(internals->rx_queues);
1739 err_free_internals:
1740         rte_free(internals);
1741         return NULL;
1742 }
1743
1744 static int
1745 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1746 {
1747         struct rte_kvargs *kvlist;
1748         char if_name[IFNAMSIZ] = {'\0'};
1749         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1750         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1751         int shared_umem = 0;
1752         char prog_path[PATH_MAX] = {'\0'};
1753         int busy_budget = -1;
1754         struct rte_eth_dev *eth_dev = NULL;
1755         const char *name;
1756
1757         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1758                 rte_vdev_device_name(dev));
1759
1760         name = rte_vdev_device_name(dev);
1761         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1762                 strlen(rte_vdev_device_args(dev)) == 0) {
1763                 eth_dev = rte_eth_dev_attach_secondary(name);
1764                 if (eth_dev == NULL) {
1765                         AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1766                         return -EINVAL;
1767                 }
1768                 eth_dev->dev_ops = &ops;
1769                 rte_eth_dev_probing_finish(eth_dev);
1770                 return 0;
1771         }
1772
1773         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1774         if (kvlist == NULL) {
1775                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1776                 return -EINVAL;
1777         }
1778
1779         if (dev->device.numa_node == SOCKET_ID_ANY)
1780                 dev->device.numa_node = rte_socket_id();
1781
1782         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1783                              &xsk_queue_cnt, &shared_umem, prog_path,
1784                              &busy_budget) < 0) {
1785                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1786                 return -EINVAL;
1787         }
1788
1789         if (strlen(if_name) == 0) {
1790                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1791                 return -EINVAL;
1792         }
1793
1794         busy_budget = busy_budget == -1 ? ETH_AF_XDP_DFLT_BUSY_BUDGET :
1795                                         busy_budget;
1796
1797         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1798                                         xsk_queue_cnt, shared_umem, prog_path,
1799                                         busy_budget);
1800         if (eth_dev == NULL) {
1801                 AF_XDP_LOG(ERR, "Failed to init internals\n");
1802                 return -1;
1803         }
1804
1805         rte_eth_dev_probing_finish(eth_dev);
1806
1807         return 0;
1808 }
1809
1810 static int
1811 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1812 {
1813         struct rte_eth_dev *eth_dev = NULL;
1814
1815         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1816                 rte_socket_id());
1817
1818         if (dev == NULL)
1819                 return -1;
1820
1821         /* find the ethdev entry */
1822         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1823         if (eth_dev == NULL)
1824                 return 0;
1825
1826         eth_dev_close(eth_dev);
1827         rte_eth_dev_release_port(eth_dev);
1828
1829
1830         return 0;
1831 }
1832
1833 static struct rte_vdev_driver pmd_af_xdp_drv = {
1834         .probe = rte_pmd_af_xdp_probe,
1835         .remove = rte_pmd_af_xdp_remove,
1836 };
1837
1838 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1839 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1840                               "iface=<string> "
1841                               "start_queue=<int> "
1842                               "queue_count=<int> "
1843                               "shared_umem=<int> "
1844                               "xdp_prog=<string> "
1845                               "busy_budget=<int>");