szedata2: add new poll mode driver
[dpdk.git] / drivers / net / szedata2 / rte_eth_szedata2.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2015 CESNET
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of CESNET nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <unistd.h>
36 #include <stdbool.h>
37 #include <err.h>
38
39 #include <libsze2.h>
40
41 #include <rte_mbuf.h>
42 #include <rte_ethdev.h>
43 #include <rte_malloc.h>
44 #include <rte_memcpy.h>
45 #include <rte_kvargs.h>
46 #include <rte_dev.h>
47
48 #include "rte_eth_szedata2.h"
49
50 #define RTE_ETH_SZEDATA2_DEV_PATH_ARG "dev_path"
51 #define RTE_ETH_SZEDATA2_RX_IFACES_ARG "rx_ifaces"
52 #define RTE_ETH_SZEDATA2_TX_IFACES_ARG "tx_ifaces"
53
54 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
55 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
56 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
57
58 /**
59  * size of szedata2_packet header with alignment
60  */
61 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
62
63 struct szedata2_rx_queue {
64         struct szedata *sze;
65         uint8_t rx_channel;
66         uint8_t in_port;
67         struct rte_mempool *mb_pool;
68         volatile uint64_t rx_pkts;
69         volatile uint64_t rx_bytes;
70         volatile uint64_t err_pkts;
71 };
72
73 struct szedata2_tx_queue {
74         struct szedata *sze;
75         uint8_t tx_channel;
76         volatile uint64_t tx_pkts;
77         volatile uint64_t err_pkts;
78         volatile uint64_t tx_bytes;
79 };
80
81 struct rxtx_szedata2 {
82         uint32_t num_of_rx;
83         uint32_t num_of_tx;
84         uint32_t sze_rx_mask_req;
85         uint32_t sze_tx_mask_req;
86         char *sze_dev;
87 };
88
89 struct pmd_internals {
90         struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
91         struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
92         unsigned nb_rx_queues;
93         unsigned nb_tx_queues;
94         uint32_t num_of_rx;
95         uint32_t num_of_tx;
96         uint32_t sze_rx_req;
97         uint32_t sze_tx_req;
98         int if_index;
99         char *sze_dev;
100 };
101
102 static const char *valid_arguments[] = {
103         RTE_ETH_SZEDATA2_DEV_PATH_ARG,
104         RTE_ETH_SZEDATA2_RX_IFACES_ARG,
105         RTE_ETH_SZEDATA2_TX_IFACES_ARG,
106         NULL
107 };
108
109 static struct ether_addr eth_addr = {
110         .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
111 };
112 static const char *drivername = "SZEdata2 PMD";
113 static struct rte_eth_link pmd_link = {
114                 .link_speed = ETH_LINK_SPEED_10G,
115                 .link_duplex = ETH_LINK_FULL_DUPLEX,
116                 .link_status = 0
117 };
118
119
120 static uint32_t
121 count_ones(uint32_t num)
122 {
123         num = num - ((num >> 1) & 0x55555555); /* reuse input as temporary */
124         num = (num & 0x33333333) + ((num >> 2) & 0x33333333);        /* temp */
125         return (((num + (num >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; /* count */
126 }
127
128 static int
129 init_rx_channels(struct rte_eth_dev *dev, int v)
130 {
131         struct pmd_internals *internals = dev->data->dev_private;
132         int ret;
133         uint32_t i;
134         uint32_t count = internals->num_of_rx;
135         uint32_t num_sub = 0;
136         uint32_t x;
137         uint32_t rx;
138         uint32_t tx;
139
140         rx = internals->sze_rx_req;
141         tx = 0;
142
143         for (i = 0; i < count; i++) {
144                 /*
145                  * Open, subscribe rx,tx channels and start device
146                  */
147                 if (v)
148                         RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n", i);
149
150                 internals->rx_queue[num_sub].sze =
151                         szedata_open(internals->sze_dev);
152                 if (internals->rx_queue[num_sub].sze == NULL)
153                         return -1;
154
155                 /* separate least significant non-zero bit */
156                 x = rx & ((~rx) + 1);
157
158                 if (v)
159                         RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
160                                 "tx channel: 0x%x\n", x, tx);
161
162                 ret = szedata_subscribe3(internals->rx_queue[num_sub].sze,
163                                 &x, &tx);
164                 if (ret) {
165                         szedata_close(internals->rx_queue[num_sub].sze);
166                         internals->rx_queue[num_sub].sze = NULL;
167                         return -1;
168                 }
169
170                 if (v)
171                         RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
172                                 "tx channel: 0x%x\n", x, tx);
173
174                 if (x) {
175                         if (v)
176                                 RTE_LOG(INFO, PMD, "Starting SZE device for "
177                                         "rx queue: %u\n", num_sub);
178
179                         ret = szedata_start(internals->rx_queue[num_sub].sze);
180                         if (ret) {
181                                 szedata_close(internals->rx_queue[num_sub].sze);
182                                 internals->rx_queue[num_sub].sze = NULL;
183                                 return -1;
184                         }
185
186                         /*
187                          * set to 1 all bits lower than bit set to 1
188                          * and that bit to 0
189                          */
190                         x -= 1;
191                         internals->rx_queue[num_sub].rx_channel =
192                                 count_ones(x);
193
194                         if (v)
195                                 RTE_LOG(INFO, PMD, "Subscribed rx channel "
196                                         "no: %u\n",
197                                         internals->rx_queue[num_sub].rx_channel
198                                         );
199
200                         num_sub++;
201                         internals->nb_rx_queues = num_sub;
202                 } else {
203                         if (v)
204                                 RTE_LOG(INFO, PMD,
205                                         "Could not subscribe any rx channel. "
206                                         "Closing SZE device\n");
207
208                         szedata_close(internals->rx_queue[num_sub].sze);
209                         internals->rx_queue[num_sub].sze = NULL;
210                 }
211
212                 /* set least significant non-zero bit to zero */
213                 rx = rx & (rx - 1);
214         }
215
216         dev->data->nb_rx_queues = (uint16_t)num_sub;
217
218         if (v)
219                 RTE_LOG(INFO, PMD, "Successfully opened rx channels: %u\n",
220                         num_sub);
221
222         return 0;
223 }
224
225 static int
226 init_tx_channels(struct rte_eth_dev *dev, int v)
227 {
228         struct pmd_internals *internals = dev->data->dev_private;
229         int ret;
230         uint32_t i;
231         uint32_t count = internals->num_of_tx;
232         uint32_t num_sub = 0;
233         uint32_t x;
234         uint32_t rx;
235         uint32_t tx;
236
237         rx = 0;
238         tx = internals->sze_tx_req;
239
240         for (i = 0; i < count; i++) {
241                 /*
242                  * Open, subscribe rx,tx channels and start device
243                  */
244                 if (v)
245                         RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n",
246                                 i + internals->num_of_rx);
247
248                 internals->tx_queue[num_sub].sze =
249                         szedata_open(internals->sze_dev);
250                 if (internals->tx_queue[num_sub].sze == NULL)
251                         return -1;
252
253                 /* separate least significant non-zero bit */
254                 x = tx & ((~tx) + 1);
255
256                 if (v)
257                         RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
258                                 "tx channel: 0x%x\n", rx, x);
259
260                 ret = szedata_subscribe3(internals->tx_queue[num_sub].sze,
261                                 &rx, &x);
262                 if (ret) {
263                         szedata_close(internals->tx_queue[num_sub].sze);
264                         internals->tx_queue[num_sub].sze = NULL;
265                         return -1;
266                 }
267
268                 if (v)
269                         RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
270                                 "tx channel: 0x%x\n", rx, x);
271
272                 if (x) {
273                         if (v)
274                                 RTE_LOG(INFO, PMD, "Starting SZE device for "
275                                         "tx queue: %u\n", num_sub);
276
277                         ret = szedata_start(internals->tx_queue[num_sub].sze);
278                         if (ret) {
279                                 szedata_close(internals->tx_queue[num_sub].sze);
280                                 internals->tx_queue[num_sub].sze = NULL;
281                                 return -1;
282                         }
283
284                         /*
285                          * set to 1 all bits lower than bit set to 1
286                          * and that bit to 0
287                          */
288                         x -= 1;
289                         internals->tx_queue[num_sub].tx_channel =
290                                 count_ones(x);
291
292                         if (v)
293                                 RTE_LOG(INFO, PMD, "Subscribed tx channel "
294                                         "no: %u\n",
295                                         internals->tx_queue[num_sub].tx_channel
296                                         );
297
298                         num_sub++;
299                         internals->nb_tx_queues = num_sub;
300                 } else {
301                         if (v)
302                                 RTE_LOG(INFO, PMD,
303                                         "Could not subscribe any tx channel. "
304                                         "Closing SZE device\n");
305
306                         szedata_close(internals->tx_queue[num_sub].sze);
307                         internals->tx_queue[num_sub].sze = NULL;
308                 }
309
310                 /* set least significant non-zero bit to zero */
311                 tx = tx & (tx - 1);
312         }
313
314         dev->data->nb_tx_queues = (uint16_t)num_sub;
315
316         if (v)
317                 RTE_LOG(INFO, PMD, "Successfully opened tx channels: %u\n",
318                         num_sub);
319
320         return 0;
321 }
322
323 static void
324 close_rx_channels(struct rte_eth_dev *dev)
325 {
326         struct pmd_internals *internals = dev->data->dev_private;
327         uint32_t i;
328         uint32_t num_sub = internals->nb_rx_queues;
329
330         for (i = 0; i < num_sub; i++) {
331                 if (internals->rx_queue[i].sze != NULL) {
332                         szedata_close(internals->rx_queue[i].sze);
333                         internals->rx_queue[i].sze = NULL;
334                 }
335         }
336         /* set number of rx queues to zero */
337         internals->nb_rx_queues = 0;
338         dev->data->nb_rx_queues = (uint16_t)0;
339 }
340
341 static void
342 close_tx_channels(struct rte_eth_dev *dev)
343 {
344         struct pmd_internals *internals = dev->data->dev_private;
345         uint32_t i;
346         uint32_t num_sub = internals->nb_tx_queues;
347
348         for (i = 0; i < num_sub; i++) {
349                 if (internals->tx_queue[i].sze != NULL) {
350                         szedata_close(internals->tx_queue[i].sze);
351                         internals->tx_queue[i].sze = NULL;
352                 }
353         }
354         /* set number of rx queues to zero */
355         internals->nb_tx_queues = 0;
356         dev->data->nb_tx_queues = (uint16_t)0;
357 }
358
359 static int
360 eth_dev_start(struct rte_eth_dev *dev)
361 {
362         struct pmd_internals *internals = dev->data->dev_private;
363         int ret;
364
365         if (internals->nb_rx_queues == 0) {
366                 ret = init_rx_channels(dev, 0);
367                 if (ret != 0) {
368                         close_rx_channels(dev);
369                         return -1;
370                 }
371         }
372
373         if (internals->nb_tx_queues == 0) {
374                 ret = init_tx_channels(dev, 0);
375                 if (ret != 0) {
376                         close_tx_channels(dev);
377                         close_rx_channels(dev);
378                         return -1;
379                 }
380         }
381
382         dev->data->dev_link.link_status = 1;
383         return 0;
384 }
385
386 static void
387 eth_dev_stop(struct rte_eth_dev *dev)
388 {
389         unsigned i;
390         struct pmd_internals *internals = dev->data->dev_private;
391
392         for (i = 0; i < internals->nb_rx_queues; i++) {
393                 if (internals->rx_queue[i].sze != NULL) {
394                         szedata_close(internals->rx_queue[i].sze);
395                         internals->rx_queue[i].sze = NULL;
396                 }
397         }
398
399         for (i = 0; i < internals->nb_tx_queues; i++) {
400                 if (internals->tx_queue[i].sze != NULL) {
401                         szedata_close(internals->tx_queue[i].sze);
402                         internals->tx_queue[i].sze = NULL;
403                 }
404         }
405
406         internals->nb_rx_queues = 0;
407         internals->nb_tx_queues = 0;
408
409         dev->data->nb_rx_queues = (uint16_t)0;
410         dev->data->nb_tx_queues = (uint16_t)0;
411
412         dev->data->dev_link.link_status = 0;
413 }
414
415 static int
416 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
417 {
418         return 0;
419 }
420
421 static void
422 eth_dev_info(struct rte_eth_dev *dev,
423                 struct rte_eth_dev_info *dev_info)
424 {
425         struct pmd_internals *internals = dev->data->dev_private;
426         dev_info->driver_name = drivername;
427         dev_info->if_index = internals->if_index;
428         dev_info->max_mac_addrs = 1;
429         dev_info->max_rx_pktlen = (uint32_t)-1;
430         dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
431         dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
432         dev_info->min_rx_bufsize = 0;
433         dev_info->pci_dev = NULL;
434 }
435
436 static void
437 eth_stats_get(struct rte_eth_dev *dev,
438                 struct rte_eth_stats *stats)
439 {
440         unsigned i;
441         uint64_t rx_total = 0;
442         uint64_t tx_total = 0;
443         uint64_t tx_err_total = 0;
444         uint64_t rx_total_bytes = 0;
445         uint64_t tx_total_bytes = 0;
446         const struct pmd_internals *internal = dev->data->dev_private;
447
448         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
449                         i < internal->nb_rx_queues; i++) {
450                 stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
451                 stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
452                 rx_total += stats->q_ipackets[i];
453                 rx_total_bytes += stats->q_ibytes[i];
454         }
455
456         for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
457                         i < internal->nb_tx_queues; i++) {
458                 stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
459                 stats->q_errors[i] = internal->tx_queue[i].err_pkts;
460                 stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
461                 tx_total += stats->q_opackets[i];
462                 tx_err_total += stats->q_errors[i];
463                 tx_total_bytes += stats->q_obytes[i];
464         }
465
466         stats->ipackets = rx_total;
467         stats->opackets = tx_total;
468         stats->ibytes = rx_total_bytes;
469         stats->obytes = tx_total_bytes;
470         stats->oerrors = tx_err_total;
471 }
472
473 static void
474 eth_stats_reset(struct rte_eth_dev *dev)
475 {
476         unsigned i;
477         struct pmd_internals *internal = dev->data->dev_private;
478         for (i = 0; i < internal->nb_rx_queues; i++) {
479                 internal->rx_queue[i].rx_pkts = 0;
480                 internal->rx_queue[i].rx_bytes = 0;
481         }
482         for (i = 0; i < internal->nb_tx_queues; i++) {
483                 internal->tx_queue[i].tx_pkts = 0;
484                 internal->tx_queue[i].err_pkts = 0;
485                 internal->tx_queue[i].tx_bytes = 0;
486         }
487 }
488
489 static void
490 eth_dev_close(struct rte_eth_dev *dev)
491 {
492         unsigned i;
493         struct pmd_internals *internals = dev->data->dev_private;
494
495         for (i = 0; i < internals->nb_rx_queues; i++) {
496                 if (internals->rx_queue[i].sze != NULL) {
497                         szedata_close(internals->rx_queue[i].sze);
498                         internals->rx_queue[i].sze = NULL;
499                 }
500         }
501
502         for (i = 0; i < internals->nb_tx_queues; i++) {
503                 if (internals->tx_queue[i].sze != NULL) {
504                         szedata_close(internals->tx_queue[i].sze);
505                         internals->tx_queue[i].sze = NULL;
506                 }
507         }
508
509         internals->nb_rx_queues = 0;
510         internals->nb_tx_queues = 0;
511
512         dev->data->nb_rx_queues = (uint16_t)0;
513         dev->data->nb_tx_queues = (uint16_t)0;
514 }
515
516 static void
517 eth_queue_release(void *q __rte_unused)
518 {
519 }
520
521 static int
522 eth_link_update(struct rte_eth_dev *dev __rte_unused,
523                 int wait_to_complete __rte_unused)
524 {
525         return 0;
526 }
527
528 static int
529 eth_rx_queue_setup(struct rte_eth_dev *dev,
530                 uint16_t rx_queue_id,
531                 uint16_t nb_rx_desc __rte_unused,
532                 unsigned int socket_id __rte_unused,
533                 const struct rte_eth_rxconf *rx_conf __rte_unused,
534                 struct rte_mempool *mb_pool)
535 {
536         struct pmd_internals *internals = dev->data->dev_private;
537         struct szedata2_rx_queue *szedata2_q =
538                 &internals->rx_queue[rx_queue_id];
539         szedata2_q->mb_pool = mb_pool;
540         dev->data->rx_queues[rx_queue_id] = szedata2_q;
541         szedata2_q->in_port = dev->data->port_id;
542         return 0;
543 }
544
545 static int
546 eth_tx_queue_setup(struct rte_eth_dev *dev,
547                 uint16_t tx_queue_id,
548                 uint16_t nb_tx_desc __rte_unused,
549                 unsigned int socket_id __rte_unused,
550                 const struct rte_eth_txconf *tx_conf __rte_unused)
551 {
552         struct pmd_internals *internals = dev->data->dev_private;
553         dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
554         return 0;
555 }
556
557 static void
558 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
559                 struct ether_addr *mac_addr __rte_unused)
560 {
561 }
562
563 static struct eth_dev_ops ops = {
564                 .dev_start          = eth_dev_start,
565                 .dev_stop           = eth_dev_stop,
566                 .dev_close          = eth_dev_close,
567                 .dev_configure      = eth_dev_configure,
568                 .dev_infos_get      = eth_dev_info,
569                 .rx_queue_setup     = eth_rx_queue_setup,
570                 .tx_queue_setup     = eth_tx_queue_setup,
571                 .rx_queue_release   = eth_queue_release,
572                 .tx_queue_release   = eth_queue_release,
573                 .link_update        = eth_link_update,
574                 .stats_get          = eth_stats_get,
575                 .stats_reset        = eth_stats_reset,
576                 .mac_addr_set       = eth_mac_addr_set,
577 };
578
579 static int
580 parse_mask(const char *mask_str, uint32_t *mask_num)
581 {
582         char *endptr;
583         long int value;
584
585         value = strtol(mask_str, &endptr, 0);
586         if (*endptr != '\0' || value > UINT32_MAX || value < 0)
587                 return -1;
588
589         *mask_num = (uint32_t)value;
590         return 0;
591 }
592
593 static int
594 add_rx_mask(const char *key __rte_unused, const char *value, void *extra_args)
595 {
596         struct rxtx_szedata2 *szedata2 = extra_args;
597         uint32_t mask;
598
599         if (parse_mask(value, &mask) != 0)
600                 return -1;
601
602         szedata2->sze_rx_mask_req |= mask;
603         return 0;
604 }
605
606 static int
607 add_tx_mask(const char *key __rte_unused, const char *value, void *extra_args)
608 {
609         struct rxtx_szedata2 *szedata2 = extra_args;
610         uint32_t mask;
611
612         if (parse_mask(value, &mask) != 0)
613                 return -1;
614
615         szedata2->sze_tx_mask_req |= mask;
616         return 0;
617 }
618
619 static int
620 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
621                 const unsigned nb_tx_queues,
622                 const unsigned numa_node,
623                 struct pmd_internals **internals,
624                 struct rte_eth_dev **eth_dev)
625 {
626         struct rte_eth_dev_data *data = NULL;
627
628         RTE_LOG(INFO, PMD,
629                         "Creating szedata2-backed ethdev on numa socket %u\n",
630                         numa_node);
631
632         /*
633          * now do all data allocation - for eth_dev structure
634          * and internal (private) data
635          */
636         data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
637         if (data == NULL)
638                 goto error;
639
640         *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
641                         numa_node);
642         if (*internals == NULL)
643                 goto error;
644
645         /* reserve an ethdev entry */
646         *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
647         if (*eth_dev == NULL)
648                 goto error;
649
650         /*
651          * now put it all together
652          * - store queue data in internals,
653          * - store numa_node info in pci_driver
654          * - point eth_dev_data to internals
655          * - and point eth_dev structure to new eth_dev_data structure
656          *
657          * NOTE: we'll replace the data element, of originally allocated eth_dev
658          * so the rings are local per-process
659          */
660
661         (*internals)->nb_rx_queues = nb_rx_queues;
662         (*internals)->nb_tx_queues = nb_tx_queues;
663
664         (*internals)->if_index = 0;
665
666         data->dev_private = *internals;
667         data->port_id = (*eth_dev)->data->port_id;
668         snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
669         data->nb_rx_queues = (uint16_t)nb_rx_queues;
670         data->nb_tx_queues = (uint16_t)nb_tx_queues;
671         data->dev_link = pmd_link;
672         data->mac_addrs = &eth_addr;
673
674         (*eth_dev)->data = data;
675         (*eth_dev)->dev_ops = &ops;
676         (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
677         (*eth_dev)->driver = NULL;
678         (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
679         (*eth_dev)->data->drv_name = drivername;
680         (*eth_dev)->data->numa_node = numa_node;
681
682         return 0;
683
684 error:
685         rte_free(data);
686         rte_free(*internals);
687         return -1;
688 }
689
690 static int
691 rte_eth_from_szedata2(const char *name,
692                 struct rxtx_szedata2 *szedata2,
693                 const unsigned numa_node)
694 {
695         struct pmd_internals *internals = NULL;
696         struct rte_eth_dev *eth_dev = NULL;
697         int ret;
698
699         if (rte_pmd_init_internals(name, 0, 0, numa_node,
700                         &internals, &eth_dev) < 0)
701                 return -1;
702
703         internals->sze_dev = szedata2->sze_dev;
704         internals->sze_rx_req = szedata2->sze_rx_mask_req;
705         internals->sze_tx_req = szedata2->sze_tx_mask_req;
706         internals->num_of_rx = szedata2->num_of_rx;
707         internals->num_of_tx = szedata2->num_of_tx;
708
709         RTE_LOG(INFO, PMD, "Number of rx channels to open: %u mask: 0x%x\n",
710                         internals->num_of_rx, internals->sze_rx_req);
711         RTE_LOG(INFO, PMD, "Number of tx channels to open: %u mask: 0x%x\n",
712                         internals->num_of_tx, internals->sze_tx_req);
713
714         ret = init_rx_channels(eth_dev, 1);
715         if (ret != 0) {
716                 close_rx_channels(eth_dev);
717                 return -1;
718         }
719
720         ret = init_tx_channels(eth_dev, 1);
721         if (ret != 0) {
722                 close_tx_channels(eth_dev);
723                 close_rx_channels(eth_dev);
724                 return -1;
725         }
726
727         eth_dev->rx_pkt_burst = NULL;
728         eth_dev->tx_pkt_burst = NULL;
729
730         return 0;
731 }
732
733
734 static int
735 rte_pmd_szedata2_devinit(const char *name, const char *params)
736 {
737         unsigned numa_node;
738         int ret;
739         struct rte_kvargs *kvlist;
740         unsigned k_idx;
741         struct rte_kvargs_pair *pair = NULL;
742         struct rxtx_szedata2 szedata2 = { 0, 0, 0, 0, NULL };
743         bool dev_path_missing = true;
744
745         RTE_LOG(INFO, PMD, "Initializing pmd_szedata2 for %s\n", name);
746
747         numa_node = rte_socket_id();
748
749         kvlist = rte_kvargs_parse(params, valid_arguments);
750         if (kvlist == NULL)
751                 return -1;
752
753         /*
754          * Get szedata2 device path and rx,tx channels from passed arguments.
755          */
756
757         if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != 1)
758                 goto err;
759
760         if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG) < 1)
761                 goto err;
762
763         if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG) < 1)
764                 goto err;
765
766         for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
767                 pair = &kvlist->pairs[k_idx];
768                 if (strstr(pair->key, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != NULL) {
769                         szedata2.sze_dev = pair->value;
770                         dev_path_missing = false;
771                         break;
772                 }
773         }
774
775         if (dev_path_missing)
776                 goto err;
777
778         ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG,
779                         &add_rx_mask, &szedata2);
780         if (ret < 0)
781                 goto err;
782
783         ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG,
784                         &add_tx_mask, &szedata2);
785         if (ret < 0)
786                 goto err;
787
788         szedata2.num_of_rx = count_ones(szedata2.sze_rx_mask_req);
789         szedata2.num_of_tx = count_ones(szedata2.sze_tx_mask_req);
790
791         RTE_LOG(INFO, PMD, "SZE device found at path %s\n", szedata2.sze_dev);
792
793         return rte_eth_from_szedata2(name, &szedata2, numa_node);
794 err:
795         rte_kvargs_free(kvlist);
796         return -1;
797 }
798
799 static int
800 rte_pmd_szedata2_devuninit(const char *name)
801 {
802         struct rte_eth_dev *dev = NULL;
803
804         RTE_LOG(INFO, PMD, "Uninitializing pmd_szedata2 for %s "
805                         "on numa socket %u\n", name, rte_socket_id());
806
807         if (name == NULL)
808                 return -1;
809
810         dev = rte_eth_dev_allocated(name);
811         if (dev == NULL)
812                 return -1;
813
814         rte_free(dev->data->dev_private);
815         rte_free(dev->data);
816         rte_eth_dev_release_port(dev);
817         return 0;
818 }
819
820 static struct rte_driver pmd_szedata2_drv = {
821         .name = "eth_szedata2",
822         .type = PMD_VDEV,
823         .init = rte_pmd_szedata2_devinit,
824         .uninit = rte_pmd_szedata2_devuninit,
825 };
826
827 PMD_REGISTER_DRIVER(pmd_szedata2_drv);