dda11de2486091f3318971359940f902c11d97df
[dpdk.git] / drivers / net / szedata2 / rte_eth_szedata2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 - 2016 CESNET
3  */
4
5 #include <stdint.h>
6 #include <unistd.h>
7 #include <stdbool.h>
8 #include <err.h>
9 #include <sys/types.h>
10 #include <dirent.h>
11 #include <sys/stat.h>
12 #include <fcntl.h>
13 #include <sys/mman.h>
14
15 #include <libsze2.h>
16
17 #include <rte_mbuf.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_malloc.h>
21 #include <rte_memcpy.h>
22 #include <rte_kvargs.h>
23 #include <rte_dev.h>
24
25 #include "rte_eth_szedata2.h"
26 #include "szedata2_logs.h"
27
28 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
29 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
30 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
31
32 /**
33  * size of szedata2_packet header with alignment
34  */
35 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
36
37 #define RTE_SZEDATA2_DRIVER_NAME net_szedata2
38
39 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
40
41 struct pmd_internals {
42         struct rte_eth_dev *dev;
43         uint16_t max_rx_queues;
44         uint16_t max_tx_queues;
45         char sze_dev[PATH_MAX];
46         struct rte_mem_resource *pci_rsc;
47 };
48
49 struct szedata2_rx_queue {
50         struct pmd_internals *priv;
51         struct szedata *sze;
52         uint8_t rx_channel;
53         uint16_t in_port;
54         struct rte_mempool *mb_pool;
55         volatile uint64_t rx_pkts;
56         volatile uint64_t rx_bytes;
57         volatile uint64_t err_pkts;
58 };
59
60 struct szedata2_tx_queue {
61         struct pmd_internals *priv;
62         struct szedata *sze;
63         uint8_t tx_channel;
64         volatile uint64_t tx_pkts;
65         volatile uint64_t tx_bytes;
66         volatile uint64_t err_pkts;
67 };
68
69 int szedata2_logtype_init;
70 int szedata2_logtype_driver;
71
72 static struct ether_addr eth_addr = {
73         .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
74 };
75
76 static uint16_t
77 eth_szedata2_rx(void *queue,
78                 struct rte_mbuf **bufs,
79                 uint16_t nb_pkts)
80 {
81         unsigned int i;
82         struct rte_mbuf *mbuf;
83         struct szedata2_rx_queue *sze_q = queue;
84         struct rte_pktmbuf_pool_private *mbp_priv;
85         uint16_t num_rx = 0;
86         uint16_t buf_size;
87         uint16_t sg_size;
88         uint16_t hw_size;
89         uint16_t packet_size;
90         uint64_t num_bytes = 0;
91         struct szedata *sze = sze_q->sze;
92         uint8_t *header_ptr = NULL; /* header of packet */
93         uint8_t *packet_ptr1 = NULL;
94         uint8_t *packet_ptr2 = NULL;
95         uint16_t packet_len1 = 0;
96         uint16_t packet_len2 = 0;
97         uint16_t hw_data_align;
98
99         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
100                 return 0;
101
102         /*
103          * Reads the given number of packets from szedata2 channel given
104          * by queue and copies the packet data into a newly allocated mbuf
105          * to return.
106          */
107         for (i = 0; i < nb_pkts; i++) {
108                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
109
110                 if (unlikely(mbuf == NULL)) {
111                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
112                         break;
113                 }
114
115                 /* get the next sze packet */
116                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
117                                 sze->ct_rx_lck->next == NULL) {
118                         /* unlock old data */
119                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
120                         sze->ct_rx_lck_orig = NULL;
121                         sze->ct_rx_lck = NULL;
122                 }
123
124                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
125                         /* nothing to read, lock new data */
126                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
127                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
128
129                         if (sze->ct_rx_lck == NULL) {
130                                 /* nothing to lock */
131                                 rte_pktmbuf_free(mbuf);
132                                 break;
133                         }
134
135                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
136                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
137
138                         if (!sze->ct_rx_rem_bytes) {
139                                 rte_pktmbuf_free(mbuf);
140                                 break;
141                         }
142                 }
143
144                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
145                         /*
146                          * cut in header
147                          * copy parts of header to merge buffer
148                          */
149                         if (sze->ct_rx_lck->next == NULL) {
150                                 rte_pktmbuf_free(mbuf);
151                                 break;
152                         }
153
154                         /* copy first part of header */
155                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
156                                         sze->ct_rx_rem_bytes);
157
158                         /* copy second part of header */
159                         sze->ct_rx_lck = sze->ct_rx_lck->next;
160                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
161                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
162                                 sze->ct_rx_cur_ptr,
163                                 RTE_SZE2_PACKET_HEADER_SIZE -
164                                 sze->ct_rx_rem_bytes);
165
166                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
167                                 sze->ct_rx_rem_bytes;
168                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
169                                 RTE_SZE2_PACKET_HEADER_SIZE +
170                                 sze->ct_rx_rem_bytes;
171
172                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
173                 } else {
174                         /* not cut */
175                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
176                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
177                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
178                 }
179
180                 sg_size = le16toh(*((uint16_t *)header_ptr));
181                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
182                 packet_size = sg_size -
183                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
184
185
186                 /* checks if packet all right */
187                 if (!sg_size)
188                         errx(5, "Zero segsize");
189
190                 /* check sg_size and hwsize */
191                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
192                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
193                                 "hwsize: %d", sg_size, hw_size);
194                 }
195
196                 hw_data_align =
197                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
198                         RTE_SZE2_PACKET_HEADER_SIZE;
199
200                 if (sze->ct_rx_rem_bytes >=
201                                 (uint16_t)(sg_size -
202                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
203                         /* no cut */
204                         /* one packet ready - go to another */
205                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
206                         packet_len1 = packet_size;
207                         packet_ptr2 = NULL;
208                         packet_len2 = 0;
209
210                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
211                                 RTE_SZE2_PACKET_HEADER_SIZE;
212                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
213                                 RTE_SZE2_PACKET_HEADER_SIZE;
214                 } else {
215                         /* cut in data */
216                         if (sze->ct_rx_lck->next == NULL) {
217                                 errx(6, "Need \"next\" lock, "
218                                         "but it is missing: %u",
219                                         sze->ct_rx_rem_bytes);
220                         }
221
222                         /* skip hw data */
223                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
224                                 uint16_t rem_size = hw_data_align -
225                                         sze->ct_rx_rem_bytes;
226
227                                 /* MOVE to next lock */
228                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
229                                 sze->ct_rx_cur_ptr =
230                                         (void *)(((uint8_t *)
231                                         (sze->ct_rx_lck->start)) + rem_size);
232
233                                 packet_ptr1 = sze->ct_rx_cur_ptr;
234                                 packet_len1 = packet_size;
235                                 packet_ptr2 = NULL;
236                                 packet_len2 = 0;
237
238                                 sze->ct_rx_cur_ptr +=
239                                         RTE_SZE2_ALIGN8(packet_size);
240                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
241                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
242                         } else {
243                                 /* get pointer and length from first part */
244                                 packet_ptr1 = sze->ct_rx_cur_ptr +
245                                         hw_data_align;
246                                 packet_len1 = sze->ct_rx_rem_bytes -
247                                         hw_data_align;
248
249                                 /* MOVE to next lock */
250                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
251                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
252
253                                 /* get pointer and length from second part */
254                                 packet_ptr2 = sze->ct_rx_cur_ptr;
255                                 packet_len2 = packet_size - packet_len1;
256
257                                 sze->ct_rx_cur_ptr +=
258                                         RTE_SZE2_ALIGN8(packet_size) -
259                                         packet_len1;
260                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
261                                         (RTE_SZE2_ALIGN8(packet_size) -
262                                          packet_len1);
263                         }
264                 }
265
266                 if (unlikely(packet_ptr1 == NULL)) {
267                         rte_pktmbuf_free(mbuf);
268                         break;
269                 }
270
271                 /* get the space available for data in the mbuf */
272                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
273                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
274                                 RTE_PKTMBUF_HEADROOM);
275
276                 if (packet_size <= buf_size) {
277                         /* sze packet will fit in one mbuf, go ahead and copy */
278                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
279                                         packet_ptr1, packet_len1);
280                         if (packet_ptr2 != NULL) {
281                                 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
282                                         uint8_t *) + packet_len1),
283                                         packet_ptr2, packet_len2);
284                         }
285                         mbuf->data_len = (uint16_t)packet_size;
286
287                         mbuf->pkt_len = packet_size;
288                         mbuf->port = sze_q->in_port;
289                         bufs[num_rx] = mbuf;
290                         num_rx++;
291                         num_bytes += packet_size;
292                 } else {
293                         /*
294                          * sze packet will not fit in one mbuf,
295                          * scattered mode is not enabled, drop packet
296                          */
297                         PMD_DRV_LOG(ERR,
298                                 "SZE segment %d bytes will not fit in one mbuf "
299                                 "(%d bytes), scattered mode is not enabled, "
300                                 "drop packet!!",
301                                 packet_size, buf_size);
302                         rte_pktmbuf_free(mbuf);
303                 }
304         }
305
306         sze_q->rx_pkts += num_rx;
307         sze_q->rx_bytes += num_bytes;
308         return num_rx;
309 }
310
311 static uint16_t
312 eth_szedata2_rx_scattered(void *queue,
313                 struct rte_mbuf **bufs,
314                 uint16_t nb_pkts)
315 {
316         unsigned int i;
317         struct rte_mbuf *mbuf;
318         struct szedata2_rx_queue *sze_q = queue;
319         struct rte_pktmbuf_pool_private *mbp_priv;
320         uint16_t num_rx = 0;
321         uint16_t buf_size;
322         uint16_t sg_size;
323         uint16_t hw_size;
324         uint16_t packet_size;
325         uint64_t num_bytes = 0;
326         struct szedata *sze = sze_q->sze;
327         uint8_t *header_ptr = NULL; /* header of packet */
328         uint8_t *packet_ptr1 = NULL;
329         uint8_t *packet_ptr2 = NULL;
330         uint16_t packet_len1 = 0;
331         uint16_t packet_len2 = 0;
332         uint16_t hw_data_align;
333         uint64_t *mbuf_failed_ptr =
334                 &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
335
336         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
337                 return 0;
338
339         /*
340          * Reads the given number of packets from szedata2 channel given
341          * by queue and copies the packet data into a newly allocated mbuf
342          * to return.
343          */
344         for (i = 0; i < nb_pkts; i++) {
345                 const struct szedata_lock *ct_rx_lck_backup;
346                 unsigned int ct_rx_rem_bytes_backup;
347                 unsigned char *ct_rx_cur_ptr_backup;
348
349                 /* get the next sze packet */
350                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
351                                 sze->ct_rx_lck->next == NULL) {
352                         /* unlock old data */
353                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
354                         sze->ct_rx_lck_orig = NULL;
355                         sze->ct_rx_lck = NULL;
356                 }
357
358                 /*
359                  * Store items from sze structure which can be changed
360                  * before mbuf allocating. Use these items in case of mbuf
361                  * allocating failure.
362                  */
363                 ct_rx_lck_backup = sze->ct_rx_lck;
364                 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
365                 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
366
367                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
368                         /* nothing to read, lock new data */
369                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
370                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
371
372                         /*
373                          * Backup items from sze structure must be updated
374                          * after locking to contain pointers to new locks.
375                          */
376                         ct_rx_lck_backup = sze->ct_rx_lck;
377                         ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
378                         ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
379
380                         if (sze->ct_rx_lck == NULL)
381                                 /* nothing to lock */
382                                 break;
383
384                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
385                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
386
387                         if (!sze->ct_rx_rem_bytes)
388                                 break;
389                 }
390
391                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
392                         /*
393                          * cut in header - copy parts of header to merge buffer
394                          */
395                         if (sze->ct_rx_lck->next == NULL)
396                                 break;
397
398                         /* copy first part of header */
399                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
400                                         sze->ct_rx_rem_bytes);
401
402                         /* copy second part of header */
403                         sze->ct_rx_lck = sze->ct_rx_lck->next;
404                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
405                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
406                                 sze->ct_rx_cur_ptr,
407                                 RTE_SZE2_PACKET_HEADER_SIZE -
408                                 sze->ct_rx_rem_bytes);
409
410                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
411                                 sze->ct_rx_rem_bytes;
412                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
413                                 RTE_SZE2_PACKET_HEADER_SIZE +
414                                 sze->ct_rx_rem_bytes;
415
416                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
417                 } else {
418                         /* not cut */
419                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
420                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
421                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
422                 }
423
424                 sg_size = le16toh(*((uint16_t *)header_ptr));
425                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
426                 packet_size = sg_size -
427                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
428
429
430                 /* checks if packet all right */
431                 if (!sg_size)
432                         errx(5, "Zero segsize");
433
434                 /* check sg_size and hwsize */
435                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
436                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
437                                         "hwsize: %d", sg_size, hw_size);
438                 }
439
440                 hw_data_align =
441                         RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
442                         hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
443
444                 if (sze->ct_rx_rem_bytes >=
445                                 (uint16_t)(sg_size -
446                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
447                         /* no cut */
448                         /* one packet ready - go to another */
449                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
450                         packet_len1 = packet_size;
451                         packet_ptr2 = NULL;
452                         packet_len2 = 0;
453
454                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
455                                 RTE_SZE2_PACKET_HEADER_SIZE;
456                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
457                                 RTE_SZE2_PACKET_HEADER_SIZE;
458                 } else {
459                         /* cut in data */
460                         if (sze->ct_rx_lck->next == NULL) {
461                                 errx(6, "Need \"next\" lock, but it is "
462                                         "missing: %u", sze->ct_rx_rem_bytes);
463                         }
464
465                         /* skip hw data */
466                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
467                                 uint16_t rem_size = hw_data_align -
468                                         sze->ct_rx_rem_bytes;
469
470                                 /* MOVE to next lock */
471                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
472                                 sze->ct_rx_cur_ptr =
473                                         (void *)(((uint8_t *)
474                                         (sze->ct_rx_lck->start)) + rem_size);
475
476                                 packet_ptr1 = sze->ct_rx_cur_ptr;
477                                 packet_len1 = packet_size;
478                                 packet_ptr2 = NULL;
479                                 packet_len2 = 0;
480
481                                 sze->ct_rx_cur_ptr +=
482                                         RTE_SZE2_ALIGN8(packet_size);
483                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
484                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
485                         } else {
486                                 /* get pointer and length from first part */
487                                 packet_ptr1 = sze->ct_rx_cur_ptr +
488                                         hw_data_align;
489                                 packet_len1 = sze->ct_rx_rem_bytes -
490                                         hw_data_align;
491
492                                 /* MOVE to next lock */
493                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
494                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
495
496                                 /* get pointer and length from second part */
497                                 packet_ptr2 = sze->ct_rx_cur_ptr;
498                                 packet_len2 = packet_size - packet_len1;
499
500                                 sze->ct_rx_cur_ptr +=
501                                         RTE_SZE2_ALIGN8(packet_size) -
502                                         packet_len1;
503                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
504                                         (RTE_SZE2_ALIGN8(packet_size) -
505                                          packet_len1);
506                         }
507                 }
508
509                 if (unlikely(packet_ptr1 == NULL))
510                         break;
511
512                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
513
514                 if (unlikely(mbuf == NULL)) {
515                         /*
516                          * Restore items from sze structure to state after
517                          * unlocking (eventually locking).
518                          */
519                         sze->ct_rx_lck = ct_rx_lck_backup;
520                         sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
521                         sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
522                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
523                         break;
524                 }
525
526                 /* get the space available for data in the mbuf */
527                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
528                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
529                                 RTE_PKTMBUF_HEADROOM);
530
531                 if (packet_size <= buf_size) {
532                         /* sze packet will fit in one mbuf, go ahead and copy */
533                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
534                                         packet_ptr1, packet_len1);
535                         if (packet_ptr2 != NULL) {
536                                 rte_memcpy((void *)
537                                         (rte_pktmbuf_mtod(mbuf, uint8_t *) +
538                                         packet_len1), packet_ptr2, packet_len2);
539                         }
540                         mbuf->data_len = (uint16_t)packet_size;
541                 } else {
542                         /*
543                          * sze packet will not fit in one mbuf,
544                          * scatter packet into more mbufs
545                          */
546                         struct rte_mbuf *m = mbuf;
547                         uint16_t len = rte_pktmbuf_tailroom(mbuf);
548
549                         /* copy first part of packet */
550                         /* fill first mbuf */
551                         rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
552                                 len);
553                         packet_len1 -= len;
554                         packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
555
556                         while (packet_len1 > 0) {
557                                 /* fill new mbufs */
558                                 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
559
560                                 if (unlikely(m->next == NULL)) {
561                                         rte_pktmbuf_free(mbuf);
562                                         /*
563                                          * Restore items from sze structure
564                                          * to state after unlocking (eventually
565                                          * locking).
566                                          */
567                                         sze->ct_rx_lck = ct_rx_lck_backup;
568                                         sze->ct_rx_rem_bytes =
569                                                 ct_rx_rem_bytes_backup;
570                                         sze->ct_rx_cur_ptr =
571                                                 ct_rx_cur_ptr_backup;
572                                         (*mbuf_failed_ptr)++;
573                                         goto finish;
574                                 }
575
576                                 m = m->next;
577
578                                 len = RTE_MIN(rte_pktmbuf_tailroom(m),
579                                         packet_len1);
580                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
581                                         packet_ptr1, len);
582
583                                 (mbuf->nb_segs)++;
584                                 packet_len1 -= len;
585                                 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
586                         }
587
588                         if (packet_ptr2 != NULL) {
589                                 /* copy second part of packet, if exists */
590                                 /* fill the rest of currently last mbuf */
591                                 len = rte_pktmbuf_tailroom(m);
592                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
593                                         packet_ptr2, len);
594                                 packet_len2 -= len;
595                                 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
596
597                                 while (packet_len2 > 0) {
598                                         /* fill new mbufs */
599                                         m->next = rte_pktmbuf_alloc(
600                                                         sze_q->mb_pool);
601
602                                         if (unlikely(m->next == NULL)) {
603                                                 rte_pktmbuf_free(mbuf);
604                                                 /*
605                                                  * Restore items from sze
606                                                  * structure to state after
607                                                  * unlocking (eventually
608                                                  * locking).
609                                                  */
610                                                 sze->ct_rx_lck =
611                                                         ct_rx_lck_backup;
612                                                 sze->ct_rx_rem_bytes =
613                                                         ct_rx_rem_bytes_backup;
614                                                 sze->ct_rx_cur_ptr =
615                                                         ct_rx_cur_ptr_backup;
616                                                 (*mbuf_failed_ptr)++;
617                                                 goto finish;
618                                         }
619
620                                         m = m->next;
621
622                                         len = RTE_MIN(rte_pktmbuf_tailroom(m),
623                                                 packet_len2);
624                                         rte_memcpy(
625                                                 rte_pktmbuf_append(mbuf, len),
626                                                 packet_ptr2, len);
627
628                                         (mbuf->nb_segs)++;
629                                         packet_len2 -= len;
630                                         packet_ptr2 = ((uint8_t *)packet_ptr2) +
631                                                 len;
632                                 }
633                         }
634                 }
635                 mbuf->pkt_len = packet_size;
636                 mbuf->port = sze_q->in_port;
637                 bufs[num_rx] = mbuf;
638                 num_rx++;
639                 num_bytes += packet_size;
640         }
641
642 finish:
643         sze_q->rx_pkts += num_rx;
644         sze_q->rx_bytes += num_bytes;
645         return num_rx;
646 }
647
648 static uint16_t
649 eth_szedata2_tx(void *queue,
650                 struct rte_mbuf **bufs,
651                 uint16_t nb_pkts)
652 {
653         struct rte_mbuf *mbuf;
654         struct szedata2_tx_queue *sze_q = queue;
655         uint16_t num_tx = 0;
656         uint64_t num_bytes = 0;
657
658         const struct szedata_lock *lck;
659         uint32_t lock_size;
660         uint32_t lock_size2;
661         void *dst;
662         uint32_t pkt_len;
663         uint32_t hwpkt_len;
664         uint32_t unlock_size;
665         uint32_t rem_len;
666         uint16_t mbuf_segs;
667         uint16_t pkt_left = nb_pkts;
668
669         if (sze_q->sze == NULL || nb_pkts == 0)
670                 return 0;
671
672         while (pkt_left > 0) {
673                 unlock_size = 0;
674                 lck = szedata_tx_lock_data(sze_q->sze,
675                         RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
676                         sze_q->tx_channel);
677                 if (lck == NULL)
678                         continue;
679
680                 dst = lck->start;
681                 lock_size = lck->len;
682                 lock_size2 = lck->next ? lck->next->len : 0;
683
684 next_packet:
685                 mbuf = bufs[nb_pkts - pkt_left];
686
687                 pkt_len = mbuf->pkt_len;
688                 mbuf_segs = mbuf->nb_segs;
689
690                 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
691                         RTE_SZE2_ALIGN8(pkt_len);
692
693                 if (lock_size + lock_size2 < hwpkt_len) {
694                         szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
695                         continue;
696                 }
697
698                 num_bytes += pkt_len;
699
700                 if (lock_size > hwpkt_len) {
701                         void *tmp_dst;
702
703                         rem_len = 0;
704
705                         /* write packet length at first 2 bytes in 8B header */
706                         *((uint16_t *)dst) = htole16(
707                                         RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
708                                         pkt_len);
709                         *(((uint16_t *)dst) + 1) = htole16(0);
710
711                         /* copy packet from mbuf */
712                         tmp_dst = ((uint8_t *)(dst)) +
713                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
714                         if (mbuf_segs == 1) {
715                                 /*
716                                  * non-scattered packet,
717                                  * transmit from one mbuf
718                                  */
719                                 rte_memcpy(tmp_dst,
720                                         rte_pktmbuf_mtod(mbuf, const void *),
721                                         pkt_len);
722                         } else {
723                                 /* scattered packet, transmit from more mbufs */
724                                 struct rte_mbuf *m = mbuf;
725                                 while (m) {
726                                         rte_memcpy(tmp_dst,
727                                                 rte_pktmbuf_mtod(m,
728                                                 const void *),
729                                                 m->data_len);
730                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
731                                                 m->data_len;
732                                         m = m->next;
733                                 }
734                         }
735
736
737                         dst = ((uint8_t *)dst) + hwpkt_len;
738                         unlock_size += hwpkt_len;
739                         lock_size -= hwpkt_len;
740
741                         rte_pktmbuf_free(mbuf);
742                         num_tx++;
743                         pkt_left--;
744                         if (pkt_left == 0) {
745                                 szedata_tx_unlock_data(sze_q->sze, lck,
746                                         unlock_size);
747                                 break;
748                         }
749                         goto next_packet;
750                 } else if (lock_size + lock_size2 >= hwpkt_len) {
751                         void *tmp_dst;
752                         uint16_t write_len;
753
754                         /* write packet length at first 2 bytes in 8B header */
755                         *((uint16_t *)dst) =
756                                 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
757                                         pkt_len);
758                         *(((uint16_t *)dst) + 1) = htole16(0);
759
760                         /*
761                          * If the raw packet (pkt_len) is smaller than lock_size
762                          * get the correct length for memcpy
763                          */
764                         write_len =
765                                 pkt_len < lock_size -
766                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
767                                 pkt_len :
768                                 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
769
770                         rem_len = hwpkt_len - lock_size;
771
772                         tmp_dst = ((uint8_t *)(dst)) +
773                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
774                         if (mbuf_segs == 1) {
775                                 /*
776                                  * non-scattered packet,
777                                  * transmit from one mbuf
778                                  */
779                                 /* copy part of packet to first area */
780                                 rte_memcpy(tmp_dst,
781                                         rte_pktmbuf_mtod(mbuf, const void *),
782                                         write_len);
783
784                                 if (lck->next)
785                                         dst = lck->next->start;
786
787                                 /* copy part of packet to second area */
788                                 rte_memcpy(dst,
789                                         (const void *)(rte_pktmbuf_mtod(mbuf,
790                                                         const uint8_t *) +
791                                         write_len), pkt_len - write_len);
792                         } else {
793                                 /* scattered packet, transmit from more mbufs */
794                                 struct rte_mbuf *m = mbuf;
795                                 uint16_t written = 0;
796                                 uint16_t to_write = 0;
797                                 bool new_mbuf = true;
798                                 uint16_t write_off = 0;
799
800                                 /* copy part of packet to first area */
801                                 while (m && written < write_len) {
802                                         to_write = RTE_MIN(m->data_len,
803                                                         write_len - written);
804                                         rte_memcpy(tmp_dst,
805                                                 rte_pktmbuf_mtod(m,
806                                                         const void *),
807                                                 to_write);
808
809                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
810                                                 to_write;
811                                         if (m->data_len <= write_len -
812                                                         written) {
813                                                 m = m->next;
814                                                 new_mbuf = true;
815                                         } else {
816                                                 new_mbuf = false;
817                                         }
818                                         written += to_write;
819                                 }
820
821                                 if (lck->next)
822                                         dst = lck->next->start;
823
824                                 tmp_dst = dst;
825                                 written = 0;
826                                 write_off = new_mbuf ? 0 : to_write;
827
828                                 /* copy part of packet to second area */
829                                 while (m && written < pkt_len - write_len) {
830                                         rte_memcpy(tmp_dst, (const void *)
831                                                 (rte_pktmbuf_mtod(m,
832                                                 uint8_t *) + write_off),
833                                                 m->data_len - write_off);
834
835                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
836                                                 (m->data_len - write_off);
837                                         written += m->data_len - write_off;
838                                         m = m->next;
839                                         write_off = 0;
840                                 }
841                         }
842
843                         dst = ((uint8_t *)dst) + rem_len;
844                         unlock_size += hwpkt_len;
845                         lock_size = lock_size2 - rem_len;
846                         lock_size2 = 0;
847
848                         rte_pktmbuf_free(mbuf);
849                         num_tx++;
850                 }
851
852                 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
853                 pkt_left--;
854         }
855
856         sze_q->tx_pkts += num_tx;
857         sze_q->err_pkts += nb_pkts - num_tx;
858         sze_q->tx_bytes += num_bytes;
859         return num_tx;
860 }
861
862 static int
863 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
864 {
865         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
866         int ret;
867         struct pmd_internals *internals = (struct pmd_internals *)
868                 dev->data->dev_private;
869
870         if (rxq->sze == NULL) {
871                 uint32_t rx = 1 << rxq->rx_channel;
872                 uint32_t tx = 0;
873                 rxq->sze = szedata_open(internals->sze_dev);
874                 if (rxq->sze == NULL)
875                         return -EINVAL;
876                 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
877                 if (ret != 0 || rx == 0)
878                         goto err;
879         }
880
881         ret = szedata_start(rxq->sze);
882         if (ret != 0)
883                 goto err;
884         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
885         return 0;
886
887 err:
888         szedata_close(rxq->sze);
889         rxq->sze = NULL;
890         return -EINVAL;
891 }
892
893 static int
894 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
895 {
896         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
897
898         if (rxq->sze != NULL) {
899                 szedata_close(rxq->sze);
900                 rxq->sze = NULL;
901         }
902
903         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
904         return 0;
905 }
906
907 static int
908 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
909 {
910         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
911         int ret;
912         struct pmd_internals *internals = (struct pmd_internals *)
913                 dev->data->dev_private;
914
915         if (txq->sze == NULL) {
916                 uint32_t rx = 0;
917                 uint32_t tx = 1 << txq->tx_channel;
918                 txq->sze = szedata_open(internals->sze_dev);
919                 if (txq->sze == NULL)
920                         return -EINVAL;
921                 ret = szedata_subscribe3(txq->sze, &rx, &tx);
922                 if (ret != 0 || tx == 0)
923                         goto err;
924         }
925
926         ret = szedata_start(txq->sze);
927         if (ret != 0)
928                 goto err;
929         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
930         return 0;
931
932 err:
933         szedata_close(txq->sze);
934         txq->sze = NULL;
935         return -EINVAL;
936 }
937
938 static int
939 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
940 {
941         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
942
943         if (txq->sze != NULL) {
944                 szedata_close(txq->sze);
945                 txq->sze = NULL;
946         }
947
948         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
949         return 0;
950 }
951
952 static int
953 eth_dev_start(struct rte_eth_dev *dev)
954 {
955         int ret;
956         uint16_t i;
957         uint16_t nb_rx = dev->data->nb_rx_queues;
958         uint16_t nb_tx = dev->data->nb_tx_queues;
959
960         for (i = 0; i < nb_rx; i++) {
961                 ret = eth_rx_queue_start(dev, i);
962                 if (ret != 0)
963                         goto err_rx;
964         }
965
966         for (i = 0; i < nb_tx; i++) {
967                 ret = eth_tx_queue_start(dev, i);
968                 if (ret != 0)
969                         goto err_tx;
970         }
971
972         return 0;
973
974 err_tx:
975         for (i = 0; i < nb_tx; i++)
976                 eth_tx_queue_stop(dev, i);
977 err_rx:
978         for (i = 0; i < nb_rx; i++)
979                 eth_rx_queue_stop(dev, i);
980         return ret;
981 }
982
983 static void
984 eth_dev_stop(struct rte_eth_dev *dev)
985 {
986         uint16_t i;
987         uint16_t nb_rx = dev->data->nb_rx_queues;
988         uint16_t nb_tx = dev->data->nb_tx_queues;
989
990         for (i = 0; i < nb_tx; i++)
991                 eth_tx_queue_stop(dev, i);
992
993         for (i = 0; i < nb_rx; i++)
994                 eth_rx_queue_stop(dev, i);
995 }
996
997 static int
998 eth_dev_configure(struct rte_eth_dev *dev)
999 {
1000         struct rte_eth_dev_data *data = dev->data;
1001         if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
1002                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1003                 data->scattered_rx = 1;
1004         } else {
1005                 dev->rx_pkt_burst = eth_szedata2_rx;
1006                 data->scattered_rx = 0;
1007         }
1008         return 0;
1009 }
1010
1011 static void
1012 eth_dev_info(struct rte_eth_dev *dev,
1013                 struct rte_eth_dev_info *dev_info)
1014 {
1015         struct pmd_internals *internals = dev->data->dev_private;
1016
1017         dev_info->if_index = 0;
1018         dev_info->max_mac_addrs = 1;
1019         dev_info->max_rx_pktlen = (uint32_t)-1;
1020         dev_info->max_rx_queues = internals->max_rx_queues;
1021         dev_info->max_tx_queues = internals->max_tx_queues;
1022         dev_info->min_rx_bufsize = 0;
1023         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
1024         dev_info->tx_offload_capa = 0;
1025         dev_info->rx_queue_offload_capa = 0;
1026         dev_info->tx_queue_offload_capa = 0;
1027         dev_info->speed_capa = ETH_LINK_SPEED_100G;
1028 }
1029
1030 static int
1031 eth_stats_get(struct rte_eth_dev *dev,
1032                 struct rte_eth_stats *stats)
1033 {
1034         uint16_t i;
1035         uint16_t nb_rx = dev->data->nb_rx_queues;
1036         uint16_t nb_tx = dev->data->nb_tx_queues;
1037         uint64_t rx_total = 0;
1038         uint64_t tx_total = 0;
1039         uint64_t tx_err_total = 0;
1040         uint64_t rx_total_bytes = 0;
1041         uint64_t tx_total_bytes = 0;
1042
1043         for (i = 0; i < nb_rx; i++) {
1044                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1045
1046                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1047                         stats->q_ipackets[i] = rxq->rx_pkts;
1048                         stats->q_ibytes[i] = rxq->rx_bytes;
1049                 }
1050                 rx_total += rxq->rx_pkts;
1051                 rx_total_bytes += rxq->rx_bytes;
1052         }
1053
1054         for (i = 0; i < nb_tx; i++) {
1055                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1056
1057                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1058                         stats->q_opackets[i] = txq->tx_pkts;
1059                         stats->q_obytes[i] = txq->tx_bytes;
1060                         stats->q_errors[i] = txq->err_pkts;
1061                 }
1062                 tx_total += txq->tx_pkts;
1063                 tx_total_bytes += txq->tx_bytes;
1064                 tx_err_total += txq->err_pkts;
1065         }
1066
1067         stats->ipackets = rx_total;
1068         stats->opackets = tx_total;
1069         stats->ibytes = rx_total_bytes;
1070         stats->obytes = tx_total_bytes;
1071         stats->oerrors = tx_err_total;
1072         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1073
1074         return 0;
1075 }
1076
1077 static void
1078 eth_stats_reset(struct rte_eth_dev *dev)
1079 {
1080         uint16_t i;
1081         uint16_t nb_rx = dev->data->nb_rx_queues;
1082         uint16_t nb_tx = dev->data->nb_tx_queues;
1083
1084         for (i = 0; i < nb_rx; i++) {
1085                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1086                 rxq->rx_pkts = 0;
1087                 rxq->rx_bytes = 0;
1088                 rxq->err_pkts = 0;
1089         }
1090         for (i = 0; i < nb_tx; i++) {
1091                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1092                 txq->tx_pkts = 0;
1093                 txq->tx_bytes = 0;
1094                 txq->err_pkts = 0;
1095         }
1096 }
1097
1098 static void
1099 eth_rx_queue_release(void *q)
1100 {
1101         struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1102
1103         if (rxq != NULL) {
1104                 if (rxq->sze != NULL)
1105                         szedata_close(rxq->sze);
1106                 rte_free(rxq);
1107         }
1108 }
1109
1110 static void
1111 eth_tx_queue_release(void *q)
1112 {
1113         struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1114
1115         if (txq != NULL) {
1116                 if (txq->sze != NULL)
1117                         szedata_close(txq->sze);
1118                 rte_free(txq);
1119         }
1120 }
1121
1122 static void
1123 eth_dev_close(struct rte_eth_dev *dev)
1124 {
1125         uint16_t i;
1126         uint16_t nb_rx = dev->data->nb_rx_queues;
1127         uint16_t nb_tx = dev->data->nb_tx_queues;
1128
1129         eth_dev_stop(dev);
1130
1131         for (i = 0; i < nb_rx; i++) {
1132                 eth_rx_queue_release(dev->data->rx_queues[i]);
1133                 dev->data->rx_queues[i] = NULL;
1134         }
1135         dev->data->nb_rx_queues = 0;
1136         for (i = 0; i < nb_tx; i++) {
1137                 eth_tx_queue_release(dev->data->tx_queues[i]);
1138                 dev->data->tx_queues[i] = NULL;
1139         }
1140         dev->data->nb_tx_queues = 0;
1141 }
1142
1143 static int
1144 eth_link_update(struct rte_eth_dev *dev,
1145                 int wait_to_complete __rte_unused)
1146 {
1147         struct rte_eth_link link;
1148
1149         memset(&link, 0, sizeof(link));
1150
1151         link.link_speed = ETH_SPEED_NUM_100G;
1152         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1153         link.link_status = ETH_LINK_UP;
1154         link.link_autoneg = ETH_LINK_FIXED;
1155
1156         rte_eth_linkstatus_set(dev, &link);
1157         return 0;
1158 }
1159
1160 static int
1161 eth_dev_set_link_up(struct rte_eth_dev *dev __rte_unused)
1162 {
1163         PMD_DRV_LOG(WARNING, "Setting link up is not supported.");
1164         return 0;
1165 }
1166
1167 static int
1168 eth_dev_set_link_down(struct rte_eth_dev *dev __rte_unused)
1169 {
1170         PMD_DRV_LOG(WARNING, "Setting link down is not supported.");
1171         return 0;
1172 }
1173
1174 static int
1175 eth_rx_queue_setup(struct rte_eth_dev *dev,
1176                 uint16_t rx_queue_id,
1177                 uint16_t nb_rx_desc __rte_unused,
1178                 unsigned int socket_id,
1179                 const struct rte_eth_rxconf *rx_conf __rte_unused,
1180                 struct rte_mempool *mb_pool)
1181 {
1182         struct pmd_internals *internals = dev->data->dev_private;
1183         struct szedata2_rx_queue *rxq;
1184         int ret;
1185         uint32_t rx = 1 << rx_queue_id;
1186         uint32_t tx = 0;
1187
1188         if (dev->data->rx_queues[rx_queue_id] != NULL) {
1189                 eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
1190                 dev->data->rx_queues[rx_queue_id] = NULL;
1191         }
1192
1193         rxq = rte_zmalloc_socket("szedata2 rx queue",
1194                         sizeof(struct szedata2_rx_queue),
1195                         RTE_CACHE_LINE_SIZE, socket_id);
1196         if (rxq == NULL) {
1197                 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
1198                                 "%" PRIu16 "!", rx_queue_id);
1199                 return -ENOMEM;
1200         }
1201
1202         rxq->priv = internals;
1203         rxq->sze = szedata_open(internals->sze_dev);
1204         if (rxq->sze == NULL) {
1205                 PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
1206                                 "%" PRIu16 "!", rx_queue_id);
1207                 eth_rx_queue_release(rxq);
1208                 return -EINVAL;
1209         }
1210         ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1211         if (ret != 0 || rx == 0) {
1212                 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
1213                                 "%" PRIu16 "!", rx_queue_id);
1214                 eth_rx_queue_release(rxq);
1215                 return -EINVAL;
1216         }
1217         rxq->rx_channel = rx_queue_id;
1218         rxq->in_port = dev->data->port_id;
1219         rxq->mb_pool = mb_pool;
1220         rxq->rx_pkts = 0;
1221         rxq->rx_bytes = 0;
1222         rxq->err_pkts = 0;
1223
1224         dev->data->rx_queues[rx_queue_id] = rxq;
1225
1226         PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
1227                         "%u.", rx_queue_id, socket_id);
1228
1229         return 0;
1230 }
1231
1232 static int
1233 eth_tx_queue_setup(struct rte_eth_dev *dev,
1234                 uint16_t tx_queue_id,
1235                 uint16_t nb_tx_desc __rte_unused,
1236                 unsigned int socket_id,
1237                 const struct rte_eth_txconf *tx_conf __rte_unused)
1238 {
1239         struct pmd_internals *internals = dev->data->dev_private;
1240         struct szedata2_tx_queue *txq;
1241         int ret;
1242         uint32_t rx = 0;
1243         uint32_t tx = 1 << tx_queue_id;
1244
1245         if (dev->data->tx_queues[tx_queue_id] != NULL) {
1246                 eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
1247                 dev->data->tx_queues[tx_queue_id] = NULL;
1248         }
1249
1250         txq = rte_zmalloc_socket("szedata2 tx queue",
1251                         sizeof(struct szedata2_tx_queue),
1252                         RTE_CACHE_LINE_SIZE, socket_id);
1253         if (txq == NULL) {
1254                 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
1255                                 "%" PRIu16 "!", tx_queue_id);
1256                 return -ENOMEM;
1257         }
1258
1259         txq->priv = internals;
1260         txq->sze = szedata_open(internals->sze_dev);
1261         if (txq->sze == NULL) {
1262                 PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
1263                                 "%" PRIu16 "!", tx_queue_id);
1264                 eth_tx_queue_release(txq);
1265                 return -EINVAL;
1266         }
1267         ret = szedata_subscribe3(txq->sze, &rx, &tx);
1268         if (ret != 0 || tx == 0) {
1269                 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
1270                                 "%" PRIu16 "!", tx_queue_id);
1271                 eth_tx_queue_release(txq);
1272                 return -EINVAL;
1273         }
1274         txq->tx_channel = tx_queue_id;
1275         txq->tx_pkts = 0;
1276         txq->tx_bytes = 0;
1277         txq->err_pkts = 0;
1278
1279         dev->data->tx_queues[tx_queue_id] = txq;
1280
1281         PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
1282                         "%u.", tx_queue_id, socket_id);
1283
1284         return 0;
1285 }
1286
1287 static int
1288 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1289                 struct ether_addr *mac_addr __rte_unused)
1290 {
1291         return 0;
1292 }
1293
1294 static void
1295 eth_promiscuous_enable(struct rte_eth_dev *dev __rte_unused)
1296 {
1297         PMD_DRV_LOG(WARNING, "Enabling promiscuous mode is not supported. "
1298                         "The card is always in promiscuous mode.");
1299 }
1300
1301 static void
1302 eth_promiscuous_disable(struct rte_eth_dev *dev __rte_unused)
1303 {
1304         PMD_DRV_LOG(WARNING, "Disabling promiscuous mode is not supported. "
1305                         "The card is always in promiscuous mode.");
1306 }
1307
1308 static void
1309 eth_allmulticast_enable(struct rte_eth_dev *dev __rte_unused)
1310 {
1311         PMD_DRV_LOG(WARNING, "Enabling allmulticast mode is not supported.");
1312 }
1313
1314 static void
1315 eth_allmulticast_disable(struct rte_eth_dev *dev __rte_unused)
1316 {
1317         PMD_DRV_LOG(WARNING, "Disabling allmulticast mode is not supported.");
1318 }
1319
1320 static const struct eth_dev_ops ops = {
1321         .dev_start          = eth_dev_start,
1322         .dev_stop           = eth_dev_stop,
1323         .dev_set_link_up    = eth_dev_set_link_up,
1324         .dev_set_link_down  = eth_dev_set_link_down,
1325         .dev_close          = eth_dev_close,
1326         .dev_configure      = eth_dev_configure,
1327         .dev_infos_get      = eth_dev_info,
1328         .promiscuous_enable   = eth_promiscuous_enable,
1329         .promiscuous_disable  = eth_promiscuous_disable,
1330         .allmulticast_enable  = eth_allmulticast_enable,
1331         .allmulticast_disable = eth_allmulticast_disable,
1332         .rx_queue_start     = eth_rx_queue_start,
1333         .rx_queue_stop      = eth_rx_queue_stop,
1334         .tx_queue_start     = eth_tx_queue_start,
1335         .tx_queue_stop      = eth_tx_queue_stop,
1336         .rx_queue_setup     = eth_rx_queue_setup,
1337         .tx_queue_setup     = eth_tx_queue_setup,
1338         .rx_queue_release   = eth_rx_queue_release,
1339         .tx_queue_release   = eth_tx_queue_release,
1340         .link_update        = eth_link_update,
1341         .stats_get          = eth_stats_get,
1342         .stats_reset        = eth_stats_reset,
1343         .mac_addr_set       = eth_mac_addr_set,
1344 };
1345
1346 /*
1347  * This function goes through sysfs and looks for an index of szedata2
1348  * device file (/dev/szedataIIX, where X is the index).
1349  *
1350  * @return
1351  *           0 on success
1352  *          -1 on error
1353  */
1354 static int
1355 get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
1356 {
1357         DIR *dir;
1358         struct dirent *entry;
1359         int ret;
1360         uint32_t tmp_index;
1361         FILE *fd;
1362         char pcislot_path[PATH_MAX];
1363         uint32_t domain;
1364         uint8_t bus;
1365         uint8_t devid;
1366         uint8_t function;
1367
1368         dir = opendir("/sys/class/combo");
1369         if (dir == NULL)
1370                 return -1;
1371
1372         /*
1373          * Iterate through all combosixX directories.
1374          * When the value in /sys/class/combo/combosixX/device/pcislot
1375          * file is the location of the ethernet device dev, "X" is the
1376          * index of the device.
1377          */
1378         while ((entry = readdir(dir)) != NULL) {
1379                 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1380                 if (ret != 1)
1381                         continue;
1382
1383                 snprintf(pcislot_path, PATH_MAX,
1384                         "/sys/class/combo/combosix%u/device/pcislot",
1385                         tmp_index);
1386
1387                 fd = fopen(pcislot_path, "r");
1388                 if (fd == NULL)
1389                         continue;
1390
1391                 ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
1392                                 &domain, &bus, &devid, &function);
1393                 fclose(fd);
1394                 if (ret != 4)
1395                         continue;
1396
1397                 if (pcislot_addr->domain == domain &&
1398                                 pcislot_addr->bus == bus &&
1399                                 pcislot_addr->devid == devid &&
1400                                 pcislot_addr->function == function) {
1401                         *index = tmp_index;
1402                         closedir(dir);
1403                         return 0;
1404                 }
1405         }
1406
1407         closedir(dir);
1408         return -1;
1409 }
1410
1411 static int
1412 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1413 {
1414         struct rte_eth_dev_data *data = dev->data;
1415         struct pmd_internals *internals = (struct pmd_internals *)
1416                 data->dev_private;
1417         struct szedata *szedata_temp;
1418         int ret;
1419         uint32_t szedata2_index;
1420         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1421         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1422         struct rte_mem_resource *pci_rsc =
1423                 &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1424         char rsc_filename[PATH_MAX];
1425         void *pci_resource_ptr = NULL;
1426         int fd;
1427
1428         PMD_INIT_LOG(INFO, "Initializing szedata2 device (" PCI_PRI_FMT ")",
1429                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1430                         pci_addr->function);
1431
1432         internals->dev = dev;
1433
1434         /* Get index of szedata2 device file and create path to device file */
1435         ret = get_szedata2_index(pci_addr, &szedata2_index);
1436         if (ret != 0) {
1437                 PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
1438                 return -ENODEV;
1439         }
1440         snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1441                         szedata2_index);
1442
1443         PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev);
1444
1445         /*
1446          * Get number of available DMA RX and TX channels, which is maximum
1447          * number of queues that can be created and store it in private device
1448          * data structure.
1449          */
1450         szedata_temp = szedata_open(internals->sze_dev);
1451         if (szedata_temp == NULL) {
1452                 PMD_INIT_LOG(ERR, "szedata_open(): failed to open %s",
1453                                 internals->sze_dev);
1454                 return -EINVAL;
1455         }
1456         internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1457                         SZE2_DIR_RX);
1458         internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1459                         SZE2_DIR_TX);
1460         szedata_close(szedata_temp);
1461
1462         PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
1463                         internals->max_rx_queues, internals->max_tx_queues);
1464
1465         /* Set rx, tx burst functions */
1466         if (data->scattered_rx == 1)
1467                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1468         else
1469                 dev->rx_pkt_burst = eth_szedata2_rx;
1470         dev->tx_pkt_burst = eth_szedata2_tx;
1471
1472         /* Set function callbacks for Ethernet API */
1473         dev->dev_ops = &ops;
1474
1475         rte_eth_copy_pci_info(dev, pci_dev);
1476
1477         /* mmap pci resource0 file to rte_mem_resource structure */
1478         if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1479                         0) {
1480                 PMD_INIT_LOG(ERR, "Missing resource%u file",
1481                                 PCI_RESOURCE_NUMBER);
1482                 return -EINVAL;
1483         }
1484         snprintf(rsc_filename, PATH_MAX,
1485                 "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
1486                 pci_addr->domain, pci_addr->bus,
1487                 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1488         fd = open(rsc_filename, O_RDWR);
1489         if (fd < 0) {
1490                 PMD_INIT_LOG(ERR, "Could not open file %s", rsc_filename);
1491                 return -EINVAL;
1492         }
1493
1494         pci_resource_ptr = mmap(0,
1495                         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1496                         PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1497         close(fd);
1498         if (pci_resource_ptr == MAP_FAILED) {
1499                 PMD_INIT_LOG(ERR, "Could not mmap file %s (fd = %d)",
1500                                 rsc_filename, fd);
1501                 return -EINVAL;
1502         }
1503         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
1504         internals->pci_rsc = pci_rsc;
1505
1506         PMD_INIT_LOG(DEBUG, "resource%u phys_addr = 0x%llx len = %llu "
1507                         "virt addr = %llx", PCI_RESOURCE_NUMBER,
1508                         (unsigned long long)pci_rsc->phys_addr,
1509                         (unsigned long long)pci_rsc->len,
1510                         (unsigned long long)pci_rsc->addr);
1511
1512         /* Get link state */
1513         eth_link_update(dev, 0);
1514
1515         /* Allocate space for one mac address */
1516         data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1517                         RTE_CACHE_LINE_SIZE);
1518         if (data->mac_addrs == NULL) {
1519                 PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
1520                 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1521                        pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1522                 return -EINVAL;
1523         }
1524
1525         ether_addr_copy(&eth_addr, data->mac_addrs);
1526
1527         PMD_INIT_LOG(INFO, "szedata2 device ("
1528                         PCI_PRI_FMT ") successfully initialized",
1529                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1530                         pci_addr->function);
1531
1532         return 0;
1533 }
1534
1535 static int
1536 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1537 {
1538         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1539         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1540
1541         rte_free(dev->data->mac_addrs);
1542         dev->data->mac_addrs = NULL;
1543         munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1544                pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1545
1546         PMD_DRV_LOG(INFO, "szedata2 device ("
1547                         PCI_PRI_FMT ") successfully uninitialized",
1548                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1549                         pci_addr->function);
1550
1551         return 0;
1552 }
1553
1554 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1555         {
1556                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1557                                 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1558         },
1559         {
1560                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1561                                 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1562         },
1563         {
1564                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1565                                 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1566         },
1567         {
1568                 .vendor_id = 0,
1569         }
1570 };
1571
1572 static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1573         struct rte_pci_device *pci_dev)
1574 {
1575         return rte_eth_dev_pci_generic_probe(pci_dev,
1576                 sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
1577 }
1578
1579 static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
1580 {
1581         return rte_eth_dev_pci_generic_remove(pci_dev,
1582                 rte_szedata2_eth_dev_uninit);
1583 }
1584
1585 static struct rte_pci_driver szedata2_eth_driver = {
1586         .id_table = rte_szedata2_pci_id_table,
1587         .probe = szedata2_eth_pci_probe,
1588         .remove = szedata2_eth_pci_remove,
1589 };
1590
1591 RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
1592 RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
1593 RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
1594         "* combo6core & combov3 & szedata2 & szedata2_cv3");
1595
1596 RTE_INIT(szedata2_init_log);
1597 static void
1598 szedata2_init_log(void)
1599 {
1600         szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
1601         if (szedata2_logtype_init >= 0)
1602                 rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
1603         szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
1604         if (szedata2_logtype_driver >= 0)
1605                 rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);
1606 }