net/ice/base: add more macros for FDID priority
[dpdk.git] / drivers / net / af_xdp / rte_eth_af_xdp.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation.
3  */
4 #include <unistd.h>
5 #include <errno.h>
6 #include <stdlib.h>
7 #include <string.h>
8 #include <poll.h>
9 #include <netinet/in.h>
10 #include <net/if.h>
11 #include <sys/socket.h>
12 #include <sys/ioctl.h>
13 #include <linux/if_ether.h>
14 #include <linux/if_xdp.h>
15 #include <linux/if_link.h>
16 #include <linux/ethtool.h>
17 #include <linux/sockios.h>
18 #include "af_xdp_deps.h"
19 #include <bpf/xsk.h>
20
21 #include <rte_ethdev.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_ethdev_vdev.h>
24 #include <rte_kvargs.h>
25 #include <rte_bus_vdev.h>
26 #include <rte_string_fns.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_dev.h>
30 #include <rte_eal.h>
31 #include <rte_ether.h>
32 #include <rte_lcore.h>
33 #include <rte_log.h>
34 #include <rte_memory.h>
35 #include <rte_memzone.h>
36 #include <rte_mempool.h>
37 #include <rte_mbuf.h>
38 #include <rte_malloc.h>
39 #include <rte_ring.h>
40
41 #ifndef SOL_XDP
42 #define SOL_XDP 283
43 #endif
44
45 #ifndef AF_XDP
46 #define AF_XDP 44
47 #endif
48
49 #ifndef PF_XDP
50 #define PF_XDP AF_XDP
51 #endif
52
53 static int af_xdp_logtype;
54
55 #define AF_XDP_LOG(level, fmt, args...)                 \
56         rte_log(RTE_LOG_ ## level, af_xdp_logtype,      \
57                 "%s(): " fmt, __func__, ##args)
58
59 #define ETH_AF_XDP_FRAME_SIZE           2048
60 #define ETH_AF_XDP_NUM_BUFFERS          4096
61 #define ETH_AF_XDP_DFLT_NUM_DESCS       XSK_RING_CONS__DEFAULT_NUM_DESCS
62 #define ETH_AF_XDP_DFLT_START_QUEUE_IDX 0
63 #define ETH_AF_XDP_DFLT_QUEUE_COUNT     1
64
65 #define ETH_AF_XDP_RX_BATCH_SIZE        32
66 #define ETH_AF_XDP_TX_BATCH_SIZE        32
67
68
69 struct xsk_umem_info {
70         struct xsk_ring_prod fq;
71         struct xsk_ring_cons cq;
72         struct xsk_umem *umem;
73         struct rte_ring *buf_ring;
74         const struct rte_memzone *mz;
75         struct rte_mempool *mb_pool;
76         void *buffer;
77 };
78
79 struct rx_stats {
80         uint64_t rx_pkts;
81         uint64_t rx_bytes;
82         uint64_t rx_dropped;
83 };
84
85 struct pkt_rx_queue {
86         struct xsk_ring_cons rx;
87         struct xsk_umem_info *umem;
88         struct xsk_socket *xsk;
89         struct rte_mempool *mb_pool;
90
91         struct rx_stats stats;
92
93         struct pkt_tx_queue *pair;
94         struct pollfd fds[1];
95         int xsk_queue_idx;
96 };
97
98 struct tx_stats {
99         uint64_t tx_pkts;
100         uint64_t tx_bytes;
101         uint64_t tx_dropped;
102 };
103
104 struct pkt_tx_queue {
105         struct xsk_ring_prod tx;
106         struct xsk_umem_info *umem;
107
108         struct tx_stats stats;
109
110         struct pkt_rx_queue *pair;
111         int xsk_queue_idx;
112 };
113
114 struct pmd_internals {
115         int if_index;
116         char if_name[IFNAMSIZ];
117         int start_queue_idx;
118         int queue_cnt;
119         int max_queue_cnt;
120         int combined_queue_cnt;
121
122         struct rte_ether_addr eth_addr;
123
124         struct pkt_rx_queue *rx_queues;
125         struct pkt_tx_queue *tx_queues;
126 };
127
128 #define ETH_AF_XDP_IFACE_ARG                    "iface"
129 #define ETH_AF_XDP_START_QUEUE_ARG              "start_queue"
130 #define ETH_AF_XDP_QUEUE_COUNT_ARG              "queue_count"
131
132 static const char * const valid_arguments[] = {
133         ETH_AF_XDP_IFACE_ARG,
134         ETH_AF_XDP_START_QUEUE_ARG,
135         ETH_AF_XDP_QUEUE_COUNT_ARG,
136         NULL
137 };
138
139 static const struct rte_eth_link pmd_link = {
140         .link_speed = ETH_SPEED_NUM_10G,
141         .link_duplex = ETH_LINK_FULL_DUPLEX,
142         .link_status = ETH_LINK_DOWN,
143         .link_autoneg = ETH_LINK_AUTONEG
144 };
145
146 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
147 static inline int
148 reserve_fill_queue_zc(struct xsk_umem_info *umem, uint16_t reserve_size,
149                       struct rte_mbuf **bufs)
150 {
151         struct xsk_ring_prod *fq = &umem->fq;
152         uint32_t idx;
153         uint16_t i;
154
155         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
156                 for (i = 0; i < reserve_size; i++)
157                         rte_pktmbuf_free(bufs[i]);
158                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
159                 return -1;
160         }
161
162         for (i = 0; i < reserve_size; i++) {
163                 __u64 *fq_addr;
164                 uint64_t addr;
165
166                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
167                 addr = (uint64_t)bufs[i] - (uint64_t)umem->buffer -
168                                 umem->mb_pool->header_size;
169                 *fq_addr = addr;
170         }
171
172         xsk_ring_prod__submit(fq, reserve_size);
173
174         return 0;
175 }
176 #else
177 static inline int
178 reserve_fill_queue_cp(struct xsk_umem_info *umem, uint16_t reserve_size,
179                       struct rte_mbuf **bufs __rte_unused)
180 {
181         struct xsk_ring_prod *fq = &umem->fq;
182         void *addrs[reserve_size];
183         uint32_t idx;
184         uint16_t i;
185
186         if (rte_ring_dequeue_bulk(umem->buf_ring, addrs, reserve_size, NULL)
187                     != reserve_size) {
188                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
189                 return -1;
190         }
191
192         if (unlikely(!xsk_ring_prod__reserve(fq, reserve_size, &idx))) {
193                 AF_XDP_LOG(DEBUG, "Failed to reserve enough fq descs.\n");
194                 rte_ring_enqueue_bulk(umem->buf_ring, addrs,
195                                 reserve_size, NULL);
196                 return -1;
197         }
198
199         for (i = 0; i < reserve_size; i++) {
200                 __u64 *fq_addr;
201
202                 fq_addr = xsk_ring_prod__fill_addr(fq, idx++);
203                 *fq_addr = (uint64_t)addrs[i];
204         }
205
206         xsk_ring_prod__submit(fq, reserve_size);
207
208         return 0;
209 }
210 #endif
211
212 static inline int
213 reserve_fill_queue(struct xsk_umem_info *umem, uint16_t reserve_size,
214                    struct rte_mbuf **bufs)
215 {
216 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
217         return reserve_fill_queue_zc(umem, reserve_size, bufs);
218 #else
219         return reserve_fill_queue_cp(umem, reserve_size, bufs);
220 #endif
221 }
222
223 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
224 static uint16_t
225 af_xdp_rx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
226 {
227         struct pkt_rx_queue *rxq = queue;
228         struct xsk_ring_cons *rx = &rxq->rx;
229         struct xsk_umem_info *umem = rxq->umem;
230         uint32_t idx_rx = 0;
231         unsigned long rx_bytes = 0;
232         int rcvd, i;
233         struct rte_mbuf *fq_bufs[ETH_AF_XDP_RX_BATCH_SIZE];
234
235         /* allocate bufs for fill queue replenishment after rx */
236         if (rte_pktmbuf_alloc_bulk(umem->mb_pool, fq_bufs, nb_pkts)) {
237                 AF_XDP_LOG(DEBUG,
238                         "Failed to get enough buffers for fq.\n");
239                 return -1;
240         }
241
242         rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
243
244         if (rcvd == 0) {
245 #if defined(XDP_USE_NEED_WAKEUP)
246                 if (xsk_ring_prod__needs_wakeup(&umem->fq))
247                         (void)poll(rxq->fds, 1, 1000);
248 #endif
249
250                 goto out;
251         }
252
253         for (i = 0; i < rcvd; i++) {
254                 const struct xdp_desc *desc;
255                 uint64_t addr;
256                 uint32_t len;
257                 uint64_t offset;
258
259                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
260                 addr = desc->addr;
261                 len = desc->len;
262
263                 offset = xsk_umem__extract_offset(addr);
264                 addr = xsk_umem__extract_addr(addr);
265
266                 bufs[i] = (struct rte_mbuf *)
267                                 xsk_umem__get_data(umem->buffer, addr +
268                                         umem->mb_pool->header_size);
269                 bufs[i]->data_off = offset - sizeof(struct rte_mbuf) -
270                         rte_pktmbuf_priv_size(umem->mb_pool) -
271                         umem->mb_pool->header_size;
272
273                 rte_pktmbuf_pkt_len(bufs[i]) = len;
274                 rte_pktmbuf_data_len(bufs[i]) = len;
275                 rx_bytes += len;
276         }
277
278         xsk_ring_cons__release(rx, rcvd);
279
280         (void)reserve_fill_queue(umem, rcvd, fq_bufs);
281
282         /* statistics */
283         rxq->stats.rx_pkts += rcvd;
284         rxq->stats.rx_bytes += rx_bytes;
285
286 out:
287         if (rcvd != nb_pkts)
288                 rte_mempool_put_bulk(umem->mb_pool, (void **)&fq_bufs[rcvd],
289                                      nb_pkts - rcvd);
290
291         return rcvd;
292 }
293 #else
294 static uint16_t
295 af_xdp_rx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
296 {
297         struct pkt_rx_queue *rxq = queue;
298         struct xsk_ring_cons *rx = &rxq->rx;
299         struct xsk_umem_info *umem = rxq->umem;
300         struct xsk_ring_prod *fq = &umem->fq;
301         uint32_t idx_rx = 0;
302         unsigned long rx_bytes = 0;
303         int rcvd, i;
304         uint32_t free_thresh = fq->size >> 1;
305         struct rte_mbuf *mbufs[ETH_AF_XDP_RX_BATCH_SIZE];
306
307         if (unlikely(rte_pktmbuf_alloc_bulk(rxq->mb_pool, mbufs, nb_pkts) != 0))
308                 return 0;
309
310         rcvd = xsk_ring_cons__peek(rx, nb_pkts, &idx_rx);
311         if (rcvd == 0) {
312 #if defined(XDP_USE_NEED_WAKEUP)
313                 if (xsk_ring_prod__needs_wakeup(fq))
314                         (void)poll(rxq->fds, 1, 1000);
315 #endif
316
317                 goto out;
318         }
319
320         if (xsk_prod_nb_free(fq, free_thresh) >= free_thresh)
321                 (void)reserve_fill_queue(umem, ETH_AF_XDP_RX_BATCH_SIZE, NULL);
322
323         for (i = 0; i < rcvd; i++) {
324                 const struct xdp_desc *desc;
325                 uint64_t addr;
326                 uint32_t len;
327                 void *pkt;
328
329                 desc = xsk_ring_cons__rx_desc(rx, idx_rx++);
330                 addr = desc->addr;
331                 len = desc->len;
332                 pkt = xsk_umem__get_data(rxq->umem->mz->addr, addr);
333
334                 rte_memcpy(rte_pktmbuf_mtod(mbufs[i], void *), pkt, len);
335                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
336                 rte_pktmbuf_pkt_len(mbufs[i]) = len;
337                 rte_pktmbuf_data_len(mbufs[i]) = len;
338                 rx_bytes += len;
339                 bufs[i] = mbufs[i];
340         }
341
342         xsk_ring_cons__release(rx, rcvd);
343
344         /* statistics */
345         rxq->stats.rx_pkts += rcvd;
346         rxq->stats.rx_bytes += rx_bytes;
347
348 out:
349         if (rcvd != nb_pkts)
350                 rte_mempool_put_bulk(rxq->mb_pool, (void **)&mbufs[rcvd],
351                                      nb_pkts - rcvd);
352
353         return rcvd;
354 }
355 #endif
356
357 static uint16_t
358 eth_af_xdp_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
359 {
360         nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_RX_BATCH_SIZE);
361
362 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
363         return af_xdp_rx_zc(queue, bufs, nb_pkts);
364 #else
365         return af_xdp_rx_cp(queue, bufs, nb_pkts);
366 #endif
367 }
368
369 static void
370 pull_umem_cq(struct xsk_umem_info *umem, int size)
371 {
372         struct xsk_ring_cons *cq = &umem->cq;
373         size_t i, n;
374         uint32_t idx_cq = 0;
375
376         n = xsk_ring_cons__peek(cq, size, &idx_cq);
377
378         for (i = 0; i < n; i++) {
379                 uint64_t addr;
380                 addr = *xsk_ring_cons__comp_addr(cq, idx_cq++);
381 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
382                 addr = xsk_umem__extract_addr(addr);
383                 rte_pktmbuf_free((struct rte_mbuf *)
384                                         xsk_umem__get_data(umem->buffer,
385                                         addr + umem->mb_pool->header_size));
386 #else
387                 rte_ring_enqueue(umem->buf_ring, (void *)addr);
388 #endif
389         }
390
391         xsk_ring_cons__release(cq, n);
392 }
393
394 static void
395 kick_tx(struct pkt_tx_queue *txq)
396 {
397         struct xsk_umem_info *umem = txq->umem;
398
399 #if defined(XDP_USE_NEED_WAKEUP)
400         if (xsk_ring_prod__needs_wakeup(&txq->tx))
401 #endif
402                 while (send(xsk_socket__fd(txq->pair->xsk), NULL,
403                             0, MSG_DONTWAIT) < 0) {
404                         /* some thing unexpected */
405                         if (errno != EBUSY && errno != EAGAIN && errno != EINTR)
406                                 break;
407
408                         /* pull from completion queue to leave more space */
409                         if (errno == EAGAIN)
410                                 pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
411                 }
412 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
413         pull_umem_cq(umem, ETH_AF_XDP_TX_BATCH_SIZE);
414 #endif
415 }
416
417 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
418 static uint16_t
419 af_xdp_tx_zc(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
420 {
421         struct pkt_tx_queue *txq = queue;
422         struct xsk_umem_info *umem = txq->umem;
423         struct rte_mbuf *mbuf;
424         unsigned long tx_bytes = 0;
425         int i;
426         uint32_t idx_tx;
427         uint16_t count = 0;
428         struct xdp_desc *desc;
429         uint64_t addr, offset;
430
431         pull_umem_cq(umem, nb_pkts);
432
433         for (i = 0; i < nb_pkts; i++) {
434                 mbuf = bufs[i];
435
436                 if (mbuf->pool == umem->mb_pool) {
437                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
438                                 kick_tx(txq);
439                                 goto out;
440                         }
441                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
442                         desc->len = mbuf->pkt_len;
443                         addr = (uint64_t)mbuf - (uint64_t)umem->buffer -
444                                         umem->mb_pool->header_size;
445                         offset = rte_pktmbuf_mtod(mbuf, uint64_t) -
446                                         (uint64_t)mbuf +
447                                         umem->mb_pool->header_size;
448                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
449                         desc->addr = addr | offset;
450                         count++;
451                 } else {
452                         struct rte_mbuf *local_mbuf =
453                                         rte_pktmbuf_alloc(umem->mb_pool);
454                         void *pkt;
455
456                         if (local_mbuf == NULL)
457                                 goto out;
458
459                         if (!xsk_ring_prod__reserve(&txq->tx, 1, &idx_tx)) {
460                                 rte_pktmbuf_free(local_mbuf);
461                                 kick_tx(txq);
462                                 goto out;
463                         }
464
465                         desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx);
466                         desc->len = mbuf->pkt_len;
467
468                         addr = (uint64_t)local_mbuf - (uint64_t)umem->buffer -
469                                         umem->mb_pool->header_size;
470                         offset = rte_pktmbuf_mtod(local_mbuf, uint64_t) -
471                                         (uint64_t)local_mbuf +
472                                         umem->mb_pool->header_size;
473                         pkt = xsk_umem__get_data(umem->buffer, addr + offset);
474                         offset = offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
475                         desc->addr = addr | offset;
476                         rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *),
477                                         desc->len);
478                         rte_pktmbuf_free(mbuf);
479                         count++;
480                 }
481
482                 tx_bytes += mbuf->pkt_len;
483         }
484
485         kick_tx(txq);
486
487 out:
488         xsk_ring_prod__submit(&txq->tx, count);
489
490         txq->stats.tx_pkts += count;
491         txq->stats.tx_bytes += tx_bytes;
492         txq->stats.tx_dropped += nb_pkts - count;
493
494         return count;
495 }
496 #else
497 static uint16_t
498 af_xdp_tx_cp(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
499 {
500         struct pkt_tx_queue *txq = queue;
501         struct xsk_umem_info *umem = txq->umem;
502         struct rte_mbuf *mbuf;
503         void *addrs[ETH_AF_XDP_TX_BATCH_SIZE];
504         unsigned long tx_bytes = 0;
505         int i;
506         uint32_t idx_tx;
507
508         nb_pkts = RTE_MIN(nb_pkts, ETH_AF_XDP_TX_BATCH_SIZE);
509
510         pull_umem_cq(umem, nb_pkts);
511
512         nb_pkts = rte_ring_dequeue_bulk(umem->buf_ring, addrs,
513                                         nb_pkts, NULL);
514         if (nb_pkts == 0)
515                 return 0;
516
517         if (xsk_ring_prod__reserve(&txq->tx, nb_pkts, &idx_tx) != nb_pkts) {
518                 kick_tx(txq);
519                 rte_ring_enqueue_bulk(umem->buf_ring, addrs, nb_pkts, NULL);
520                 return 0;
521         }
522
523         for (i = 0; i < nb_pkts; i++) {
524                 struct xdp_desc *desc;
525                 void *pkt;
526
527                 desc = xsk_ring_prod__tx_desc(&txq->tx, idx_tx + i);
528                 mbuf = bufs[i];
529                 desc->len = mbuf->pkt_len;
530
531                 desc->addr = (uint64_t)addrs[i];
532                 pkt = xsk_umem__get_data(umem->mz->addr,
533                                          desc->addr);
534                 rte_memcpy(pkt, rte_pktmbuf_mtod(mbuf, void *), desc->len);
535                 tx_bytes += mbuf->pkt_len;
536                 rte_pktmbuf_free(mbuf);
537         }
538
539         xsk_ring_prod__submit(&txq->tx, nb_pkts);
540
541         kick_tx(txq);
542
543         txq->stats.tx_pkts += nb_pkts;
544         txq->stats.tx_bytes += tx_bytes;
545
546         return nb_pkts;
547 }
548 #endif
549
550 static uint16_t
551 eth_af_xdp_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
552 {
553 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
554         return af_xdp_tx_zc(queue, bufs, nb_pkts);
555 #else
556         return af_xdp_tx_cp(queue, bufs, nb_pkts);
557 #endif
558 }
559
560 static int
561 eth_dev_start(struct rte_eth_dev *dev)
562 {
563         dev->data->dev_link.link_status = ETH_LINK_UP;
564
565         return 0;
566 }
567
568 /* This function gets called when the current port gets stopped. */
569 static void
570 eth_dev_stop(struct rte_eth_dev *dev)
571 {
572         dev->data->dev_link.link_status = ETH_LINK_DOWN;
573 }
574
575 static int
576 eth_dev_configure(struct rte_eth_dev *dev)
577 {
578         /* rx/tx must be paired */
579         if (dev->data->nb_rx_queues != dev->data->nb_tx_queues)
580                 return -EINVAL;
581
582         return 0;
583 }
584
585 static int
586 eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
587 {
588         struct pmd_internals *internals = dev->data->dev_private;
589
590         dev_info->if_index = internals->if_index;
591         dev_info->max_mac_addrs = 1;
592         dev_info->max_rx_pktlen = ETH_FRAME_LEN;
593         dev_info->max_rx_queues = internals->queue_cnt;
594         dev_info->max_tx_queues = internals->queue_cnt;
595
596         dev_info->min_mtu = RTE_ETHER_MIN_MTU;
597 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
598         dev_info->max_mtu = getpagesize() -
599                                 sizeof(struct rte_mempool_objhdr) -
600                                 sizeof(struct rte_mbuf) -
601                                 RTE_PKTMBUF_HEADROOM - XDP_PACKET_HEADROOM;
602 #else
603         dev_info->max_mtu = ETH_AF_XDP_FRAME_SIZE - XDP_PACKET_HEADROOM;
604 #endif
605
606         dev_info->default_rxportconf.nb_queues = 1;
607         dev_info->default_txportconf.nb_queues = 1;
608         dev_info->default_rxportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
609         dev_info->default_txportconf.ring_size = ETH_AF_XDP_DFLT_NUM_DESCS;
610
611         return 0;
612 }
613
614 static int
615 eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
616 {
617         struct pmd_internals *internals = dev->data->dev_private;
618         struct xdp_statistics xdp_stats;
619         struct pkt_rx_queue *rxq;
620         struct pkt_tx_queue *txq;
621         socklen_t optlen;
622         int i, ret;
623
624         for (i = 0; i < dev->data->nb_rx_queues; i++) {
625                 optlen = sizeof(struct xdp_statistics);
626                 rxq = &internals->rx_queues[i];
627                 txq = rxq->pair;
628                 stats->q_ipackets[i] = rxq->stats.rx_pkts;
629                 stats->q_ibytes[i] = rxq->stats.rx_bytes;
630
631                 stats->q_opackets[i] = txq->stats.tx_pkts;
632                 stats->q_obytes[i] = txq->stats.tx_bytes;
633
634                 stats->ipackets += stats->q_ipackets[i];
635                 stats->ibytes += stats->q_ibytes[i];
636                 stats->imissed += rxq->stats.rx_dropped;
637                 stats->oerrors += txq->stats.tx_dropped;
638                 ret = getsockopt(xsk_socket__fd(rxq->xsk), SOL_XDP,
639                                 XDP_STATISTICS, &xdp_stats, &optlen);
640                 if (ret != 0) {
641                         AF_XDP_LOG(ERR, "getsockopt() failed for XDP_STATISTICS.\n");
642                         return -1;
643                 }
644                 stats->imissed += xdp_stats.rx_dropped;
645
646                 stats->opackets += stats->q_opackets[i];
647                 stats->obytes += stats->q_obytes[i];
648         }
649
650         return 0;
651 }
652
653 static int
654 eth_stats_reset(struct rte_eth_dev *dev)
655 {
656         struct pmd_internals *internals = dev->data->dev_private;
657         int i;
658
659         for (i = 0; i < internals->queue_cnt; i++) {
660                 memset(&internals->rx_queues[i].stats, 0,
661                                         sizeof(struct rx_stats));
662                 memset(&internals->tx_queues[i].stats, 0,
663                                         sizeof(struct tx_stats));
664         }
665
666         return 0;
667 }
668
669 static void
670 remove_xdp_program(struct pmd_internals *internals)
671 {
672         uint32_t curr_prog_id = 0;
673
674         if (bpf_get_link_xdp_id(internals->if_index, &curr_prog_id,
675                                 XDP_FLAGS_UPDATE_IF_NOEXIST)) {
676                 AF_XDP_LOG(ERR, "bpf_get_link_xdp_id failed\n");
677                 return;
678         }
679         bpf_set_link_xdp_fd(internals->if_index, -1,
680                         XDP_FLAGS_UPDATE_IF_NOEXIST);
681 }
682
683 static void
684 xdp_umem_destroy(struct xsk_umem_info *umem)
685 {
686 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
687         rte_mempool_free(umem->mb_pool);
688         umem->mb_pool = NULL;
689 #else
690         rte_memzone_free(umem->mz);
691         umem->mz = NULL;
692
693         rte_ring_free(umem->buf_ring);
694         umem->buf_ring = NULL;
695 #endif
696
697         rte_free(umem);
698         umem = NULL;
699 }
700
701 static void
702 eth_dev_close(struct rte_eth_dev *dev)
703 {
704         struct pmd_internals *internals = dev->data->dev_private;
705         struct pkt_rx_queue *rxq;
706         int i;
707
708         AF_XDP_LOG(INFO, "Closing AF_XDP ethdev on numa socket %u\n",
709                 rte_socket_id());
710
711         for (i = 0; i < internals->queue_cnt; i++) {
712                 rxq = &internals->rx_queues[i];
713                 if (rxq->umem == NULL)
714                         break;
715                 xsk_socket__delete(rxq->xsk);
716                 (void)xsk_umem__delete(rxq->umem->umem);
717                 xdp_umem_destroy(rxq->umem);
718
719                 /* free pkt_tx_queue */
720                 rte_free(rxq->pair);
721                 rte_free(rxq);
722         }
723
724         /*
725          * MAC is not allocated dynamically, setting it to NULL would prevent
726          * from releasing it in rte_eth_dev_release_port.
727          */
728         dev->data->mac_addrs = NULL;
729
730         remove_xdp_program(internals);
731 }
732
733 static void
734 eth_queue_release(void *q __rte_unused)
735 {
736 }
737
738 static int
739 eth_link_update(struct rte_eth_dev *dev __rte_unused,
740                 int wait_to_complete __rte_unused)
741 {
742         return 0;
743 }
744
745 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
746 static inline uint64_t get_base_addr(struct rte_mempool *mp)
747 {
748         struct rte_mempool_memhdr *memhdr;
749
750         memhdr = STAILQ_FIRST(&mp->mem_list);
751         return (uint64_t)memhdr->addr & ~(getpagesize() - 1);
752 }
753
754 static struct
755 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals __rte_unused,
756                                   struct pkt_rx_queue *rxq)
757 {
758         struct xsk_umem_info *umem;
759         int ret;
760         struct xsk_umem_config usr_config = {
761                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
762                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
763                 .flags = XDP_UMEM_UNALIGNED_CHUNK_FLAG};
764         void *base_addr = NULL;
765         struct rte_mempool *mb_pool = rxq->mb_pool;
766
767         usr_config.frame_size = rte_mempool_calc_obj_size(mb_pool->elt_size,
768                                                                 mb_pool->flags,
769                                                                 NULL);
770         usr_config.frame_headroom = mb_pool->header_size +
771                                         sizeof(struct rte_mbuf) +
772                                         rte_pktmbuf_priv_size(mb_pool) +
773                                         RTE_PKTMBUF_HEADROOM;
774
775         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
776         if (umem == NULL) {
777                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
778                 return NULL;
779         }
780
781         umem->mb_pool = mb_pool;
782         base_addr = (void *)get_base_addr(mb_pool);
783
784         ret = xsk_umem__create(&umem->umem, base_addr,
785                                mb_pool->populated_size * usr_config.frame_size,
786                                &umem->fq, &umem->cq,
787                                &usr_config);
788
789         if (ret) {
790                 AF_XDP_LOG(ERR, "Failed to create umem");
791                 goto err;
792         }
793         umem->buffer = base_addr;
794
795 #else
796 static struct
797 xsk_umem_info *xdp_umem_configure(struct pmd_internals *internals,
798                                   struct pkt_rx_queue *rxq)
799 {
800         struct xsk_umem_info *umem;
801         const struct rte_memzone *mz;
802         struct xsk_umem_config usr_config = {
803                 .fill_size = ETH_AF_XDP_DFLT_NUM_DESCS,
804                 .comp_size = ETH_AF_XDP_DFLT_NUM_DESCS,
805                 .frame_size = ETH_AF_XDP_FRAME_SIZE,
806                 .frame_headroom = 0 };
807         char ring_name[RTE_RING_NAMESIZE];
808         char mz_name[RTE_MEMZONE_NAMESIZE];
809         int ret;
810         uint64_t i;
811
812         umem = rte_zmalloc_socket("umem", sizeof(*umem), 0, rte_socket_id());
813         if (umem == NULL) {
814                 AF_XDP_LOG(ERR, "Failed to allocate umem info");
815                 return NULL;
816         }
817
818         snprintf(ring_name, sizeof(ring_name), "af_xdp_ring_%s_%u",
819                        internals->if_name, rxq->xsk_queue_idx);
820         umem->buf_ring = rte_ring_create(ring_name,
821                                          ETH_AF_XDP_NUM_BUFFERS,
822                                          rte_socket_id(),
823                                          RING_F_SP_ENQ | RING_F_SC_DEQ);
824         if (umem->buf_ring == NULL) {
825                 AF_XDP_LOG(ERR, "Failed to create rte_ring\n");
826                 goto err;
827         }
828
829         for (i = 0; i < ETH_AF_XDP_NUM_BUFFERS; i++)
830                 rte_ring_enqueue(umem->buf_ring,
831                                  (void *)(i * ETH_AF_XDP_FRAME_SIZE));
832
833         snprintf(mz_name, sizeof(mz_name), "af_xdp_umem_%s_%u",
834                        internals->if_name, rxq->xsk_queue_idx);
835         mz = rte_memzone_reserve_aligned(mz_name,
836                         ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
837                         rte_socket_id(), RTE_MEMZONE_IOVA_CONTIG,
838                         getpagesize());
839         if (mz == NULL) {
840                 AF_XDP_LOG(ERR, "Failed to reserve memzone for af_xdp umem.\n");
841                 goto err;
842         }
843
844         ret = xsk_umem__create(&umem->umem, mz->addr,
845                                ETH_AF_XDP_NUM_BUFFERS * ETH_AF_XDP_FRAME_SIZE,
846                                &umem->fq, &umem->cq,
847                                &usr_config);
848
849         if (ret) {
850                 AF_XDP_LOG(ERR, "Failed to create umem");
851                 goto err;
852         }
853         umem->mz = mz;
854
855 #endif
856         return umem;
857
858 err:
859         xdp_umem_destroy(umem);
860         return NULL;
861 }
862
863 static int
864 xsk_configure(struct pmd_internals *internals, struct pkt_rx_queue *rxq,
865               int ring_size)
866 {
867         struct xsk_socket_config cfg;
868         struct pkt_tx_queue *txq = rxq->pair;
869         int ret = 0;
870         int reserve_size = ETH_AF_XDP_DFLT_NUM_DESCS / 2;
871         struct rte_mbuf *fq_bufs[reserve_size];
872
873         rxq->umem = xdp_umem_configure(internals, rxq);
874         if (rxq->umem == NULL)
875                 return -ENOMEM;
876         txq->umem = rxq->umem;
877
878         cfg.rx_size = ring_size;
879         cfg.tx_size = ring_size;
880         cfg.libbpf_flags = 0;
881         cfg.xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
882         cfg.bind_flags = 0;
883
884 #if defined(XDP_USE_NEED_WAKEUP)
885         cfg.bind_flags |= XDP_USE_NEED_WAKEUP;
886 #endif
887
888         ret = xsk_socket__create(&rxq->xsk, internals->if_name,
889                         rxq->xsk_queue_idx, rxq->umem->umem, &rxq->rx,
890                         &txq->tx, &cfg);
891         if (ret) {
892                 AF_XDP_LOG(ERR, "Failed to create xsk socket.\n");
893                 goto err;
894         }
895
896 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
897         if (rte_pktmbuf_alloc_bulk(rxq->umem->mb_pool, fq_bufs, reserve_size)) {
898                 AF_XDP_LOG(DEBUG, "Failed to get enough buffers for fq.\n");
899                 goto err;
900         }
901 #endif
902         ret = reserve_fill_queue(rxq->umem, reserve_size, fq_bufs);
903         if (ret) {
904                 xsk_socket__delete(rxq->xsk);
905                 AF_XDP_LOG(ERR, "Failed to reserve fill queue.\n");
906                 goto err;
907         }
908
909         return 0;
910
911 err:
912         xdp_umem_destroy(rxq->umem);
913
914         return ret;
915 }
916
917 static int
918 eth_rx_queue_setup(struct rte_eth_dev *dev,
919                    uint16_t rx_queue_id,
920                    uint16_t nb_rx_desc,
921                    unsigned int socket_id __rte_unused,
922                    const struct rte_eth_rxconf *rx_conf __rte_unused,
923                    struct rte_mempool *mb_pool)
924 {
925         struct pmd_internals *internals = dev->data->dev_private;
926         struct pkt_rx_queue *rxq;
927         int ret;
928
929         rxq = &internals->rx_queues[rx_queue_id];
930
931         AF_XDP_LOG(INFO, "Set up rx queue, rx queue id: %d, xsk queue id: %d\n",
932                    rx_queue_id, rxq->xsk_queue_idx);
933
934 #ifndef XDP_UMEM_UNALIGNED_CHUNK_FLAG
935         uint32_t buf_size, data_size;
936
937         /* Now get the space available for data in the mbuf */
938         buf_size = rte_pktmbuf_data_room_size(mb_pool) -
939                 RTE_PKTMBUF_HEADROOM;
940         data_size = ETH_AF_XDP_FRAME_SIZE;
941
942         if (data_size > buf_size) {
943                 AF_XDP_LOG(ERR, "%s: %d bytes will not fit in mbuf (%d bytes)\n",
944                         dev->device->name, data_size, buf_size);
945                 ret = -ENOMEM;
946                 goto err;
947         }
948 #endif
949
950         rxq->mb_pool = mb_pool;
951
952         if (xsk_configure(internals, rxq, nb_rx_desc)) {
953                 AF_XDP_LOG(ERR, "Failed to configure xdp socket\n");
954                 ret = -EINVAL;
955                 goto err;
956         }
957
958         rxq->fds[0].fd = xsk_socket__fd(rxq->xsk);
959         rxq->fds[0].events = POLLIN;
960
961         dev->data->rx_queues[rx_queue_id] = rxq;
962         return 0;
963
964 err:
965         return ret;
966 }
967
968 static int
969 eth_tx_queue_setup(struct rte_eth_dev *dev,
970                    uint16_t tx_queue_id,
971                    uint16_t nb_tx_desc __rte_unused,
972                    unsigned int socket_id __rte_unused,
973                    const struct rte_eth_txconf *tx_conf __rte_unused)
974 {
975         struct pmd_internals *internals = dev->data->dev_private;
976         struct pkt_tx_queue *txq;
977
978         txq = &internals->tx_queues[tx_queue_id];
979
980         dev->data->tx_queues[tx_queue_id] = txq;
981         return 0;
982 }
983
984 static int
985 eth_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
986 {
987         struct pmd_internals *internals = dev->data->dev_private;
988         struct ifreq ifr = { .ifr_mtu = mtu };
989         int ret;
990         int s;
991
992         s = socket(PF_INET, SOCK_DGRAM, 0);
993         if (s < 0)
994                 return -EINVAL;
995
996         strlcpy(ifr.ifr_name, internals->if_name, IFNAMSIZ);
997         ret = ioctl(s, SIOCSIFMTU, &ifr);
998         close(s);
999
1000         return (ret < 0) ? -errno : 0;
1001 }
1002
1003 static int
1004 eth_dev_change_flags(char *if_name, uint32_t flags, uint32_t mask)
1005 {
1006         struct ifreq ifr;
1007         int ret = 0;
1008         int s;
1009
1010         s = socket(PF_INET, SOCK_DGRAM, 0);
1011         if (s < 0)
1012                 return -errno;
1013
1014         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1015         if (ioctl(s, SIOCGIFFLAGS, &ifr) < 0) {
1016                 ret = -errno;
1017                 goto out;
1018         }
1019         ifr.ifr_flags &= mask;
1020         ifr.ifr_flags |= flags;
1021         if (ioctl(s, SIOCSIFFLAGS, &ifr) < 0) {
1022                 ret = -errno;
1023                 goto out;
1024         }
1025 out:
1026         close(s);
1027         return ret;
1028 }
1029
1030 static int
1031 eth_dev_promiscuous_enable(struct rte_eth_dev *dev)
1032 {
1033         struct pmd_internals *internals = dev->data->dev_private;
1034
1035         return eth_dev_change_flags(internals->if_name, IFF_PROMISC, ~0);
1036 }
1037
1038 static int
1039 eth_dev_promiscuous_disable(struct rte_eth_dev *dev)
1040 {
1041         struct pmd_internals *internals = dev->data->dev_private;
1042
1043         return eth_dev_change_flags(internals->if_name, 0, ~IFF_PROMISC);
1044 }
1045
1046 static const struct eth_dev_ops ops = {
1047         .dev_start = eth_dev_start,
1048         .dev_stop = eth_dev_stop,
1049         .dev_close = eth_dev_close,
1050         .dev_configure = eth_dev_configure,
1051         .dev_infos_get = eth_dev_info,
1052         .mtu_set = eth_dev_mtu_set,
1053         .promiscuous_enable = eth_dev_promiscuous_enable,
1054         .promiscuous_disable = eth_dev_promiscuous_disable,
1055         .rx_queue_setup = eth_rx_queue_setup,
1056         .tx_queue_setup = eth_tx_queue_setup,
1057         .rx_queue_release = eth_queue_release,
1058         .tx_queue_release = eth_queue_release,
1059         .link_update = eth_link_update,
1060         .stats_get = eth_stats_get,
1061         .stats_reset = eth_stats_reset,
1062 };
1063
1064 /** parse integer from integer argument */
1065 static int
1066 parse_integer_arg(const char *key __rte_unused,
1067                   const char *value, void *extra_args)
1068 {
1069         int *i = (int *)extra_args;
1070         char *end;
1071
1072         *i = strtol(value, &end, 10);
1073         if (*i < 0) {
1074                 AF_XDP_LOG(ERR, "Argument has to be positive.\n");
1075                 return -EINVAL;
1076         }
1077
1078         return 0;
1079 }
1080
1081 /** parse name argument */
1082 static int
1083 parse_name_arg(const char *key __rte_unused,
1084                const char *value, void *extra_args)
1085 {
1086         char *name = extra_args;
1087
1088         if (strnlen(value, IFNAMSIZ) > IFNAMSIZ - 1) {
1089                 AF_XDP_LOG(ERR, "Invalid name %s, should be less than %u bytes.\n",
1090                            value, IFNAMSIZ);
1091                 return -EINVAL;
1092         }
1093
1094         strlcpy(name, value, IFNAMSIZ);
1095
1096         return 0;
1097 }
1098
1099 static int
1100 xdp_get_channels_info(const char *if_name, int *max_queues,
1101                                 int *combined_queues)
1102 {
1103         struct ethtool_channels channels;
1104         struct ifreq ifr;
1105         int fd, ret;
1106
1107         fd = socket(AF_INET, SOCK_DGRAM, 0);
1108         if (fd < 0)
1109                 return -1;
1110
1111         channels.cmd = ETHTOOL_GCHANNELS;
1112         ifr.ifr_data = (void *)&channels;
1113         strncpy(ifr.ifr_name, if_name, IFNAMSIZ);
1114         ret = ioctl(fd, SIOCETHTOOL, &ifr);
1115         if (ret) {
1116                 if (errno == EOPNOTSUPP) {
1117                         ret = 0;
1118                 } else {
1119                         ret = -errno;
1120                         goto out;
1121                 }
1122         }
1123
1124         if (channels.max_combined == 0 || errno == EOPNOTSUPP) {
1125                 /* If the device says it has no channels, then all traffic
1126                  * is sent to a single stream, so max queues = 1.
1127                  */
1128                 *max_queues = 1;
1129                 *combined_queues = 1;
1130         } else {
1131                 *max_queues = channels.max_combined;
1132                 *combined_queues = channels.combined_count;
1133         }
1134
1135  out:
1136         close(fd);
1137         return ret;
1138 }
1139
1140 static int
1141 parse_parameters(struct rte_kvargs *kvlist, char *if_name, int *start_queue,
1142                         int *queue_cnt)
1143 {
1144         int ret;
1145
1146         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_IFACE_ARG,
1147                                  &parse_name_arg, if_name);
1148         if (ret < 0)
1149                 goto free_kvlist;
1150
1151         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_START_QUEUE_ARG,
1152                                  &parse_integer_arg, start_queue);
1153         if (ret < 0)
1154                 goto free_kvlist;
1155
1156         ret = rte_kvargs_process(kvlist, ETH_AF_XDP_QUEUE_COUNT_ARG,
1157                                  &parse_integer_arg, queue_cnt);
1158         if (ret < 0 || *queue_cnt <= 0) {
1159                 ret = -EINVAL;
1160                 goto free_kvlist;
1161         }
1162
1163 free_kvlist:
1164         rte_kvargs_free(kvlist);
1165         return ret;
1166 }
1167
1168 static int
1169 get_iface_info(const char *if_name,
1170                struct rte_ether_addr *eth_addr,
1171                int *if_index)
1172 {
1173         struct ifreq ifr;
1174         int sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP);
1175
1176         if (sock < 0)
1177                 return -1;
1178
1179         strlcpy(ifr.ifr_name, if_name, IFNAMSIZ);
1180         if (ioctl(sock, SIOCGIFINDEX, &ifr))
1181                 goto error;
1182
1183         *if_index = ifr.ifr_ifindex;
1184
1185         if (ioctl(sock, SIOCGIFHWADDR, &ifr))
1186                 goto error;
1187
1188         rte_memcpy(eth_addr, ifr.ifr_hwaddr.sa_data, RTE_ETHER_ADDR_LEN);
1189
1190         close(sock);
1191         return 0;
1192
1193 error:
1194         close(sock);
1195         return -1;
1196 }
1197
1198 static struct rte_eth_dev *
1199 init_internals(struct rte_vdev_device *dev, const char *if_name,
1200                         int start_queue_idx, int queue_cnt)
1201 {
1202         const char *name = rte_vdev_device_name(dev);
1203         const unsigned int numa_node = dev->device.numa_node;
1204         struct pmd_internals *internals;
1205         struct rte_eth_dev *eth_dev;
1206         int ret;
1207         int i;
1208
1209         internals = rte_zmalloc_socket(name, sizeof(*internals), 0, numa_node);
1210         if (internals == NULL)
1211                 return NULL;
1212
1213         internals->start_queue_idx = start_queue_idx;
1214         internals->queue_cnt = queue_cnt;
1215         strlcpy(internals->if_name, if_name, IFNAMSIZ);
1216
1217         if (xdp_get_channels_info(if_name, &internals->max_queue_cnt,
1218                                   &internals->combined_queue_cnt)) {
1219                 AF_XDP_LOG(ERR, "Failed to get channel info of interface: %s\n",
1220                                 if_name);
1221                 goto err_free_internals;
1222         }
1223
1224         if (queue_cnt > internals->combined_queue_cnt) {
1225                 AF_XDP_LOG(ERR, "Specified queue count %d is larger than combined queue count %d.\n",
1226                                 queue_cnt, internals->combined_queue_cnt);
1227                 goto err_free_internals;
1228         }
1229
1230         internals->rx_queues = rte_zmalloc_socket(NULL,
1231                                         sizeof(struct pkt_rx_queue) * queue_cnt,
1232                                         0, numa_node);
1233         if (internals->rx_queues == NULL) {
1234                 AF_XDP_LOG(ERR, "Failed to allocate memory for rx queues.\n");
1235                 goto err_free_internals;
1236         }
1237
1238         internals->tx_queues = rte_zmalloc_socket(NULL,
1239                                         sizeof(struct pkt_tx_queue) * queue_cnt,
1240                                         0, numa_node);
1241         if (internals->tx_queues == NULL) {
1242                 AF_XDP_LOG(ERR, "Failed to allocate memory for tx queues.\n");
1243                 goto err_free_rx;
1244         }
1245         for (i = 0; i < queue_cnt; i++) {
1246                 internals->tx_queues[i].pair = &internals->rx_queues[i];
1247                 internals->rx_queues[i].pair = &internals->tx_queues[i];
1248                 internals->rx_queues[i].xsk_queue_idx = start_queue_idx + i;
1249                 internals->tx_queues[i].xsk_queue_idx = start_queue_idx + i;
1250         }
1251
1252         ret = get_iface_info(if_name, &internals->eth_addr,
1253                              &internals->if_index);
1254         if (ret)
1255                 goto err_free_tx;
1256
1257         eth_dev = rte_eth_vdev_allocate(dev, 0);
1258         if (eth_dev == NULL)
1259                 goto err_free_tx;
1260
1261         eth_dev->data->dev_private = internals;
1262         eth_dev->data->dev_link = pmd_link;
1263         eth_dev->data->mac_addrs = &internals->eth_addr;
1264         eth_dev->dev_ops = &ops;
1265         eth_dev->rx_pkt_burst = eth_af_xdp_rx;
1266         eth_dev->tx_pkt_burst = eth_af_xdp_tx;
1267         /* Let rte_eth_dev_close() release the port resources. */
1268         eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1269
1270 #if defined(XDP_UMEM_UNALIGNED_CHUNK_FLAG)
1271         AF_XDP_LOG(INFO, "Zero copy between umem and mbuf enabled.\n");
1272 #endif
1273
1274         return eth_dev;
1275
1276 err_free_tx:
1277         rte_free(internals->tx_queues);
1278 err_free_rx:
1279         rte_free(internals->rx_queues);
1280 err_free_internals:
1281         rte_free(internals);
1282         return NULL;
1283 }
1284
1285 static int
1286 rte_pmd_af_xdp_probe(struct rte_vdev_device *dev)
1287 {
1288         struct rte_kvargs *kvlist;
1289         char if_name[IFNAMSIZ] = {'\0'};
1290         int xsk_start_queue_idx = ETH_AF_XDP_DFLT_START_QUEUE_IDX;
1291         int xsk_queue_cnt = ETH_AF_XDP_DFLT_QUEUE_COUNT;
1292         struct rte_eth_dev *eth_dev = NULL;
1293         const char *name;
1294
1295         AF_XDP_LOG(INFO, "Initializing pmd_af_xdp for %s\n",
1296                 rte_vdev_device_name(dev));
1297
1298         name = rte_vdev_device_name(dev);
1299         if (rte_eal_process_type() == RTE_PROC_SECONDARY &&
1300                 strlen(rte_vdev_device_args(dev)) == 0) {
1301                 eth_dev = rte_eth_dev_attach_secondary(name);
1302                 if (eth_dev == NULL) {
1303                         AF_XDP_LOG(ERR, "Failed to probe %s\n", name);
1304                         return -EINVAL;
1305                 }
1306                 eth_dev->dev_ops = &ops;
1307                 rte_eth_dev_probing_finish(eth_dev);
1308                 return 0;
1309         }
1310
1311         kvlist = rte_kvargs_parse(rte_vdev_device_args(dev), valid_arguments);
1312         if (kvlist == NULL) {
1313                 AF_XDP_LOG(ERR, "Invalid kvargs key\n");
1314                 return -EINVAL;
1315         }
1316
1317         if (dev->device.numa_node == SOCKET_ID_ANY)
1318                 dev->device.numa_node = rte_socket_id();
1319
1320         if (parse_parameters(kvlist, if_name, &xsk_start_queue_idx,
1321                              &xsk_queue_cnt) < 0) {
1322                 AF_XDP_LOG(ERR, "Invalid kvargs value\n");
1323                 return -EINVAL;
1324         }
1325
1326         if (strlen(if_name) == 0) {
1327                 AF_XDP_LOG(ERR, "Network interface must be specified\n");
1328                 return -EINVAL;
1329         }
1330
1331         eth_dev = init_internals(dev, if_name, xsk_start_queue_idx,
1332                                         xsk_queue_cnt);
1333         if (eth_dev == NULL) {
1334                 AF_XDP_LOG(ERR, "Failed to init internals\n");
1335                 return -1;
1336         }
1337
1338         rte_eth_dev_probing_finish(eth_dev);
1339
1340         return 0;
1341 }
1342
1343 static int
1344 rte_pmd_af_xdp_remove(struct rte_vdev_device *dev)
1345 {
1346         struct rte_eth_dev *eth_dev = NULL;
1347
1348         AF_XDP_LOG(INFO, "Removing AF_XDP ethdev on numa socket %u\n",
1349                 rte_socket_id());
1350
1351         if (dev == NULL)
1352                 return -1;
1353
1354         /* find the ethdev entry */
1355         eth_dev = rte_eth_dev_allocated(rte_vdev_device_name(dev));
1356         if (eth_dev == NULL)
1357                 return 0;
1358
1359         eth_dev_close(eth_dev);
1360         rte_eth_dev_release_port(eth_dev);
1361
1362
1363         return 0;
1364 }
1365
1366 static struct rte_vdev_driver pmd_af_xdp_drv = {
1367         .probe = rte_pmd_af_xdp_probe,
1368         .remove = rte_pmd_af_xdp_remove,
1369 };
1370
1371 RTE_PMD_REGISTER_VDEV(net_af_xdp, pmd_af_xdp_drv);
1372 RTE_PMD_REGISTER_PARAM_STRING(net_af_xdp,
1373                               "iface=<string> "
1374                               "start_queue=<int> "
1375                               "queue_count=<int> ");
1376
1377 RTE_INIT(af_xdp_init_log)
1378 {
1379         af_xdp_logtype = rte_log_register("pmd.net.af_xdp");
1380         if (af_xdp_logtype >= 0)
1381                 rte_log_set_level(af_xdp_logtype, RTE_LOG_NOTICE);
1382 }