ethdev: return diagnostic when setting MAC address
[dpdk.git] / drivers / net / szedata2 / rte_eth_szedata2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015 - 2016 CESNET
3  */
4
5 #include <stdint.h>
6 #include <unistd.h>
7 #include <stdbool.h>
8 #include <err.h>
9 #include <sys/types.h>
10 #include <dirent.h>
11 #include <sys/stat.h>
12 #include <fcntl.h>
13 #include <sys/mman.h>
14
15 #include <libsze2.h>
16
17 #include <rte_mbuf.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_malloc.h>
21 #include <rte_memcpy.h>
22 #include <rte_kvargs.h>
23 #include <rte_dev.h>
24
25 #include "rte_eth_szedata2.h"
26 #include "szedata2_logs.h"
27 #include "szedata2_iobuf.h"
28
29 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
30 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
31 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
32
33 /**
34  * size of szedata2_packet header with alignment
35  */
36 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
37
38 #define RTE_SZEDATA2_DRIVER_NAME net_szedata2
39
40 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
41
42 struct pmd_internals {
43         struct rte_eth_dev *dev;
44         uint16_t max_rx_queues;
45         uint16_t max_tx_queues;
46         char sze_dev[PATH_MAX];
47         struct rte_mem_resource *pci_rsc;
48 };
49
50 struct szedata2_rx_queue {
51         struct pmd_internals *priv;
52         struct szedata *sze;
53         uint8_t rx_channel;
54         uint16_t in_port;
55         struct rte_mempool *mb_pool;
56         volatile uint64_t rx_pkts;
57         volatile uint64_t rx_bytes;
58         volatile uint64_t err_pkts;
59 };
60
61 struct szedata2_tx_queue {
62         struct pmd_internals *priv;
63         struct szedata *sze;
64         uint8_t tx_channel;
65         volatile uint64_t tx_pkts;
66         volatile uint64_t tx_bytes;
67         volatile uint64_t err_pkts;
68 };
69
70 int szedata2_logtype_init;
71 int szedata2_logtype_driver;
72
73 static struct ether_addr eth_addr = {
74         .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
75 };
76
77 static uint16_t
78 eth_szedata2_rx(void *queue,
79                 struct rte_mbuf **bufs,
80                 uint16_t nb_pkts)
81 {
82         unsigned int i;
83         struct rte_mbuf *mbuf;
84         struct szedata2_rx_queue *sze_q = queue;
85         struct rte_pktmbuf_pool_private *mbp_priv;
86         uint16_t num_rx = 0;
87         uint16_t buf_size;
88         uint16_t sg_size;
89         uint16_t hw_size;
90         uint16_t packet_size;
91         uint64_t num_bytes = 0;
92         struct szedata *sze = sze_q->sze;
93         uint8_t *header_ptr = NULL; /* header of packet */
94         uint8_t *packet_ptr1 = NULL;
95         uint8_t *packet_ptr2 = NULL;
96         uint16_t packet_len1 = 0;
97         uint16_t packet_len2 = 0;
98         uint16_t hw_data_align;
99
100         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
101                 return 0;
102
103         /*
104          * Reads the given number of packets from szedata2 channel given
105          * by queue and copies the packet data into a newly allocated mbuf
106          * to return.
107          */
108         for (i = 0; i < nb_pkts; i++) {
109                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
110
111                 if (unlikely(mbuf == NULL)) {
112                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
113                         break;
114                 }
115
116                 /* get the next sze packet */
117                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
118                                 sze->ct_rx_lck->next == NULL) {
119                         /* unlock old data */
120                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
121                         sze->ct_rx_lck_orig = NULL;
122                         sze->ct_rx_lck = NULL;
123                 }
124
125                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
126                         /* nothing to read, lock new data */
127                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
128                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
129
130                         if (sze->ct_rx_lck == NULL) {
131                                 /* nothing to lock */
132                                 rte_pktmbuf_free(mbuf);
133                                 break;
134                         }
135
136                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
137                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
138
139                         if (!sze->ct_rx_rem_bytes) {
140                                 rte_pktmbuf_free(mbuf);
141                                 break;
142                         }
143                 }
144
145                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
146                         /*
147                          * cut in header
148                          * copy parts of header to merge buffer
149                          */
150                         if (sze->ct_rx_lck->next == NULL) {
151                                 rte_pktmbuf_free(mbuf);
152                                 break;
153                         }
154
155                         /* copy first part of header */
156                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
157                                         sze->ct_rx_rem_bytes);
158
159                         /* copy second part of header */
160                         sze->ct_rx_lck = sze->ct_rx_lck->next;
161                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
162                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
163                                 sze->ct_rx_cur_ptr,
164                                 RTE_SZE2_PACKET_HEADER_SIZE -
165                                 sze->ct_rx_rem_bytes);
166
167                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
168                                 sze->ct_rx_rem_bytes;
169                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
170                                 RTE_SZE2_PACKET_HEADER_SIZE +
171                                 sze->ct_rx_rem_bytes;
172
173                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
174                 } else {
175                         /* not cut */
176                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
177                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
178                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
179                 }
180
181                 sg_size = le16toh(*((uint16_t *)header_ptr));
182                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
183                 packet_size = sg_size -
184                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
185
186
187                 /* checks if packet all right */
188                 if (!sg_size)
189                         errx(5, "Zero segsize");
190
191                 /* check sg_size and hwsize */
192                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
193                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
194                                 "hwsize: %d", sg_size, hw_size);
195                 }
196
197                 hw_data_align =
198                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
199                         RTE_SZE2_PACKET_HEADER_SIZE;
200
201                 if (sze->ct_rx_rem_bytes >=
202                                 (uint16_t)(sg_size -
203                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
204                         /* no cut */
205                         /* one packet ready - go to another */
206                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
207                         packet_len1 = packet_size;
208                         packet_ptr2 = NULL;
209                         packet_len2 = 0;
210
211                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
212                                 RTE_SZE2_PACKET_HEADER_SIZE;
213                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
214                                 RTE_SZE2_PACKET_HEADER_SIZE;
215                 } else {
216                         /* cut in data */
217                         if (sze->ct_rx_lck->next == NULL) {
218                                 errx(6, "Need \"next\" lock, "
219                                         "but it is missing: %u",
220                                         sze->ct_rx_rem_bytes);
221                         }
222
223                         /* skip hw data */
224                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
225                                 uint16_t rem_size = hw_data_align -
226                                         sze->ct_rx_rem_bytes;
227
228                                 /* MOVE to next lock */
229                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
230                                 sze->ct_rx_cur_ptr =
231                                         (void *)(((uint8_t *)
232                                         (sze->ct_rx_lck->start)) + rem_size);
233
234                                 packet_ptr1 = sze->ct_rx_cur_ptr;
235                                 packet_len1 = packet_size;
236                                 packet_ptr2 = NULL;
237                                 packet_len2 = 0;
238
239                                 sze->ct_rx_cur_ptr +=
240                                         RTE_SZE2_ALIGN8(packet_size);
241                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
242                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
243                         } else {
244                                 /* get pointer and length from first part */
245                                 packet_ptr1 = sze->ct_rx_cur_ptr +
246                                         hw_data_align;
247                                 packet_len1 = sze->ct_rx_rem_bytes -
248                                         hw_data_align;
249
250                                 /* MOVE to next lock */
251                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
252                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
253
254                                 /* get pointer and length from second part */
255                                 packet_ptr2 = sze->ct_rx_cur_ptr;
256                                 packet_len2 = packet_size - packet_len1;
257
258                                 sze->ct_rx_cur_ptr +=
259                                         RTE_SZE2_ALIGN8(packet_size) -
260                                         packet_len1;
261                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
262                                         (RTE_SZE2_ALIGN8(packet_size) -
263                                          packet_len1);
264                         }
265                 }
266
267                 if (unlikely(packet_ptr1 == NULL)) {
268                         rte_pktmbuf_free(mbuf);
269                         break;
270                 }
271
272                 /* get the space available for data in the mbuf */
273                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
274                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
275                                 RTE_PKTMBUF_HEADROOM);
276
277                 if (packet_size <= buf_size) {
278                         /* sze packet will fit in one mbuf, go ahead and copy */
279                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
280                                         packet_ptr1, packet_len1);
281                         if (packet_ptr2 != NULL) {
282                                 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
283                                         uint8_t *) + packet_len1),
284                                         packet_ptr2, packet_len2);
285                         }
286                         mbuf->data_len = (uint16_t)packet_size;
287
288                         mbuf->pkt_len = packet_size;
289                         mbuf->port = sze_q->in_port;
290                         bufs[num_rx] = mbuf;
291                         num_rx++;
292                         num_bytes += packet_size;
293                 } else {
294                         /*
295                          * sze packet will not fit in one mbuf,
296                          * scattered mode is not enabled, drop packet
297                          */
298                         PMD_DRV_LOG(ERR,
299                                 "SZE segment %d bytes will not fit in one mbuf "
300                                 "(%d bytes), scattered mode is not enabled, "
301                                 "drop packet!!",
302                                 packet_size, buf_size);
303                         rte_pktmbuf_free(mbuf);
304                 }
305         }
306
307         sze_q->rx_pkts += num_rx;
308         sze_q->rx_bytes += num_bytes;
309         return num_rx;
310 }
311
312 static uint16_t
313 eth_szedata2_rx_scattered(void *queue,
314                 struct rte_mbuf **bufs,
315                 uint16_t nb_pkts)
316 {
317         unsigned int i;
318         struct rte_mbuf *mbuf;
319         struct szedata2_rx_queue *sze_q = queue;
320         struct rte_pktmbuf_pool_private *mbp_priv;
321         uint16_t num_rx = 0;
322         uint16_t buf_size;
323         uint16_t sg_size;
324         uint16_t hw_size;
325         uint16_t packet_size;
326         uint64_t num_bytes = 0;
327         struct szedata *sze = sze_q->sze;
328         uint8_t *header_ptr = NULL; /* header of packet */
329         uint8_t *packet_ptr1 = NULL;
330         uint8_t *packet_ptr2 = NULL;
331         uint16_t packet_len1 = 0;
332         uint16_t packet_len2 = 0;
333         uint16_t hw_data_align;
334         uint64_t *mbuf_failed_ptr =
335                 &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
336
337         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
338                 return 0;
339
340         /*
341          * Reads the given number of packets from szedata2 channel given
342          * by queue and copies the packet data into a newly allocated mbuf
343          * to return.
344          */
345         for (i = 0; i < nb_pkts; i++) {
346                 const struct szedata_lock *ct_rx_lck_backup;
347                 unsigned int ct_rx_rem_bytes_backup;
348                 unsigned char *ct_rx_cur_ptr_backup;
349
350                 /* get the next sze packet */
351                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
352                                 sze->ct_rx_lck->next == NULL) {
353                         /* unlock old data */
354                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
355                         sze->ct_rx_lck_orig = NULL;
356                         sze->ct_rx_lck = NULL;
357                 }
358
359                 /*
360                  * Store items from sze structure which can be changed
361                  * before mbuf allocating. Use these items in case of mbuf
362                  * allocating failure.
363                  */
364                 ct_rx_lck_backup = sze->ct_rx_lck;
365                 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
366                 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
367
368                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
369                         /* nothing to read, lock new data */
370                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
371                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
372
373                         /*
374                          * Backup items from sze structure must be updated
375                          * after locking to contain pointers to new locks.
376                          */
377                         ct_rx_lck_backup = sze->ct_rx_lck;
378                         ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
379                         ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
380
381                         if (sze->ct_rx_lck == NULL)
382                                 /* nothing to lock */
383                                 break;
384
385                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
386                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
387
388                         if (!sze->ct_rx_rem_bytes)
389                                 break;
390                 }
391
392                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
393                         /*
394                          * cut in header - copy parts of header to merge buffer
395                          */
396                         if (sze->ct_rx_lck->next == NULL)
397                                 break;
398
399                         /* copy first part of header */
400                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
401                                         sze->ct_rx_rem_bytes);
402
403                         /* copy second part of header */
404                         sze->ct_rx_lck = sze->ct_rx_lck->next;
405                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
406                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
407                                 sze->ct_rx_cur_ptr,
408                                 RTE_SZE2_PACKET_HEADER_SIZE -
409                                 sze->ct_rx_rem_bytes);
410
411                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
412                                 sze->ct_rx_rem_bytes;
413                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
414                                 RTE_SZE2_PACKET_HEADER_SIZE +
415                                 sze->ct_rx_rem_bytes;
416
417                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
418                 } else {
419                         /* not cut */
420                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
421                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
422                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
423                 }
424
425                 sg_size = le16toh(*((uint16_t *)header_ptr));
426                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
427                 packet_size = sg_size -
428                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
429
430
431                 /* checks if packet all right */
432                 if (!sg_size)
433                         errx(5, "Zero segsize");
434
435                 /* check sg_size and hwsize */
436                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
437                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
438                                         "hwsize: %d", sg_size, hw_size);
439                 }
440
441                 hw_data_align =
442                         RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
443                         hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
444
445                 if (sze->ct_rx_rem_bytes >=
446                                 (uint16_t)(sg_size -
447                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
448                         /* no cut */
449                         /* one packet ready - go to another */
450                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
451                         packet_len1 = packet_size;
452                         packet_ptr2 = NULL;
453                         packet_len2 = 0;
454
455                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
456                                 RTE_SZE2_PACKET_HEADER_SIZE;
457                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
458                                 RTE_SZE2_PACKET_HEADER_SIZE;
459                 } else {
460                         /* cut in data */
461                         if (sze->ct_rx_lck->next == NULL) {
462                                 errx(6, "Need \"next\" lock, but it is "
463                                         "missing: %u", sze->ct_rx_rem_bytes);
464                         }
465
466                         /* skip hw data */
467                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
468                                 uint16_t rem_size = hw_data_align -
469                                         sze->ct_rx_rem_bytes;
470
471                                 /* MOVE to next lock */
472                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
473                                 sze->ct_rx_cur_ptr =
474                                         (void *)(((uint8_t *)
475                                         (sze->ct_rx_lck->start)) + rem_size);
476
477                                 packet_ptr1 = sze->ct_rx_cur_ptr;
478                                 packet_len1 = packet_size;
479                                 packet_ptr2 = NULL;
480                                 packet_len2 = 0;
481
482                                 sze->ct_rx_cur_ptr +=
483                                         RTE_SZE2_ALIGN8(packet_size);
484                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
485                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
486                         } else {
487                                 /* get pointer and length from first part */
488                                 packet_ptr1 = sze->ct_rx_cur_ptr +
489                                         hw_data_align;
490                                 packet_len1 = sze->ct_rx_rem_bytes -
491                                         hw_data_align;
492
493                                 /* MOVE to next lock */
494                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
495                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
496
497                                 /* get pointer and length from second part */
498                                 packet_ptr2 = sze->ct_rx_cur_ptr;
499                                 packet_len2 = packet_size - packet_len1;
500
501                                 sze->ct_rx_cur_ptr +=
502                                         RTE_SZE2_ALIGN8(packet_size) -
503                                         packet_len1;
504                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
505                                         (RTE_SZE2_ALIGN8(packet_size) -
506                                          packet_len1);
507                         }
508                 }
509
510                 if (unlikely(packet_ptr1 == NULL))
511                         break;
512
513                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
514
515                 if (unlikely(mbuf == NULL)) {
516                         /*
517                          * Restore items from sze structure to state after
518                          * unlocking (eventually locking).
519                          */
520                         sze->ct_rx_lck = ct_rx_lck_backup;
521                         sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
522                         sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
523                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
524                         break;
525                 }
526
527                 /* get the space available for data in the mbuf */
528                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
529                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
530                                 RTE_PKTMBUF_HEADROOM);
531
532                 if (packet_size <= buf_size) {
533                         /* sze packet will fit in one mbuf, go ahead and copy */
534                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
535                                         packet_ptr1, packet_len1);
536                         if (packet_ptr2 != NULL) {
537                                 rte_memcpy((void *)
538                                         (rte_pktmbuf_mtod(mbuf, uint8_t *) +
539                                         packet_len1), packet_ptr2, packet_len2);
540                         }
541                         mbuf->data_len = (uint16_t)packet_size;
542                 } else {
543                         /*
544                          * sze packet will not fit in one mbuf,
545                          * scatter packet into more mbufs
546                          */
547                         struct rte_mbuf *m = mbuf;
548                         uint16_t len = rte_pktmbuf_tailroom(mbuf);
549
550                         /* copy first part of packet */
551                         /* fill first mbuf */
552                         rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
553                                 len);
554                         packet_len1 -= len;
555                         packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
556
557                         while (packet_len1 > 0) {
558                                 /* fill new mbufs */
559                                 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
560
561                                 if (unlikely(m->next == NULL)) {
562                                         rte_pktmbuf_free(mbuf);
563                                         /*
564                                          * Restore items from sze structure
565                                          * to state after unlocking (eventually
566                                          * locking).
567                                          */
568                                         sze->ct_rx_lck = ct_rx_lck_backup;
569                                         sze->ct_rx_rem_bytes =
570                                                 ct_rx_rem_bytes_backup;
571                                         sze->ct_rx_cur_ptr =
572                                                 ct_rx_cur_ptr_backup;
573                                         (*mbuf_failed_ptr)++;
574                                         goto finish;
575                                 }
576
577                                 m = m->next;
578
579                                 len = RTE_MIN(rte_pktmbuf_tailroom(m),
580                                         packet_len1);
581                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
582                                         packet_ptr1, len);
583
584                                 (mbuf->nb_segs)++;
585                                 packet_len1 -= len;
586                                 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
587                         }
588
589                         if (packet_ptr2 != NULL) {
590                                 /* copy second part of packet, if exists */
591                                 /* fill the rest of currently last mbuf */
592                                 len = rte_pktmbuf_tailroom(m);
593                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
594                                         packet_ptr2, len);
595                                 packet_len2 -= len;
596                                 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
597
598                                 while (packet_len2 > 0) {
599                                         /* fill new mbufs */
600                                         m->next = rte_pktmbuf_alloc(
601                                                         sze_q->mb_pool);
602
603                                         if (unlikely(m->next == NULL)) {
604                                                 rte_pktmbuf_free(mbuf);
605                                                 /*
606                                                  * Restore items from sze
607                                                  * structure to state after
608                                                  * unlocking (eventually
609                                                  * locking).
610                                                  */
611                                                 sze->ct_rx_lck =
612                                                         ct_rx_lck_backup;
613                                                 sze->ct_rx_rem_bytes =
614                                                         ct_rx_rem_bytes_backup;
615                                                 sze->ct_rx_cur_ptr =
616                                                         ct_rx_cur_ptr_backup;
617                                                 (*mbuf_failed_ptr)++;
618                                                 goto finish;
619                                         }
620
621                                         m = m->next;
622
623                                         len = RTE_MIN(rte_pktmbuf_tailroom(m),
624                                                 packet_len2);
625                                         rte_memcpy(
626                                                 rte_pktmbuf_append(mbuf, len),
627                                                 packet_ptr2, len);
628
629                                         (mbuf->nb_segs)++;
630                                         packet_len2 -= len;
631                                         packet_ptr2 = ((uint8_t *)packet_ptr2) +
632                                                 len;
633                                 }
634                         }
635                 }
636                 mbuf->pkt_len = packet_size;
637                 mbuf->port = sze_q->in_port;
638                 bufs[num_rx] = mbuf;
639                 num_rx++;
640                 num_bytes += packet_size;
641         }
642
643 finish:
644         sze_q->rx_pkts += num_rx;
645         sze_q->rx_bytes += num_bytes;
646         return num_rx;
647 }
648
649 static uint16_t
650 eth_szedata2_tx(void *queue,
651                 struct rte_mbuf **bufs,
652                 uint16_t nb_pkts)
653 {
654         struct rte_mbuf *mbuf;
655         struct szedata2_tx_queue *sze_q = queue;
656         uint16_t num_tx = 0;
657         uint64_t num_bytes = 0;
658
659         const struct szedata_lock *lck;
660         uint32_t lock_size;
661         uint32_t lock_size2;
662         void *dst;
663         uint32_t pkt_len;
664         uint32_t hwpkt_len;
665         uint32_t unlock_size;
666         uint32_t rem_len;
667         uint16_t mbuf_segs;
668         uint16_t pkt_left = nb_pkts;
669
670         if (sze_q->sze == NULL || nb_pkts == 0)
671                 return 0;
672
673         while (pkt_left > 0) {
674                 unlock_size = 0;
675                 lck = szedata_tx_lock_data(sze_q->sze,
676                         RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
677                         sze_q->tx_channel);
678                 if (lck == NULL)
679                         continue;
680
681                 dst = lck->start;
682                 lock_size = lck->len;
683                 lock_size2 = lck->next ? lck->next->len : 0;
684
685 next_packet:
686                 mbuf = bufs[nb_pkts - pkt_left];
687
688                 pkt_len = mbuf->pkt_len;
689                 mbuf_segs = mbuf->nb_segs;
690
691                 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
692                         RTE_SZE2_ALIGN8(pkt_len);
693
694                 if (lock_size + lock_size2 < hwpkt_len) {
695                         szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
696                         continue;
697                 }
698
699                 num_bytes += pkt_len;
700
701                 if (lock_size > hwpkt_len) {
702                         void *tmp_dst;
703
704                         rem_len = 0;
705
706                         /* write packet length at first 2 bytes in 8B header */
707                         *((uint16_t *)dst) = htole16(
708                                         RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
709                                         pkt_len);
710                         *(((uint16_t *)dst) + 1) = htole16(0);
711
712                         /* copy packet from mbuf */
713                         tmp_dst = ((uint8_t *)(dst)) +
714                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
715                         if (mbuf_segs == 1) {
716                                 /*
717                                  * non-scattered packet,
718                                  * transmit from one mbuf
719                                  */
720                                 rte_memcpy(tmp_dst,
721                                         rte_pktmbuf_mtod(mbuf, const void *),
722                                         pkt_len);
723                         } else {
724                                 /* scattered packet, transmit from more mbufs */
725                                 struct rte_mbuf *m = mbuf;
726                                 while (m) {
727                                         rte_memcpy(tmp_dst,
728                                                 rte_pktmbuf_mtod(m,
729                                                 const void *),
730                                                 m->data_len);
731                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
732                                                 m->data_len;
733                                         m = m->next;
734                                 }
735                         }
736
737
738                         dst = ((uint8_t *)dst) + hwpkt_len;
739                         unlock_size += hwpkt_len;
740                         lock_size -= hwpkt_len;
741
742                         rte_pktmbuf_free(mbuf);
743                         num_tx++;
744                         pkt_left--;
745                         if (pkt_left == 0) {
746                                 szedata_tx_unlock_data(sze_q->sze, lck,
747                                         unlock_size);
748                                 break;
749                         }
750                         goto next_packet;
751                 } else if (lock_size + lock_size2 >= hwpkt_len) {
752                         void *tmp_dst;
753                         uint16_t write_len;
754
755                         /* write packet length at first 2 bytes in 8B header */
756                         *((uint16_t *)dst) =
757                                 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
758                                         pkt_len);
759                         *(((uint16_t *)dst) + 1) = htole16(0);
760
761                         /*
762                          * If the raw packet (pkt_len) is smaller than lock_size
763                          * get the correct length for memcpy
764                          */
765                         write_len =
766                                 pkt_len < lock_size -
767                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
768                                 pkt_len :
769                                 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
770
771                         rem_len = hwpkt_len - lock_size;
772
773                         tmp_dst = ((uint8_t *)(dst)) +
774                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
775                         if (mbuf_segs == 1) {
776                                 /*
777                                  * non-scattered packet,
778                                  * transmit from one mbuf
779                                  */
780                                 /* copy part of packet to first area */
781                                 rte_memcpy(tmp_dst,
782                                         rte_pktmbuf_mtod(mbuf, const void *),
783                                         write_len);
784
785                                 if (lck->next)
786                                         dst = lck->next->start;
787
788                                 /* copy part of packet to second area */
789                                 rte_memcpy(dst,
790                                         (const void *)(rte_pktmbuf_mtod(mbuf,
791                                                         const uint8_t *) +
792                                         write_len), pkt_len - write_len);
793                         } else {
794                                 /* scattered packet, transmit from more mbufs */
795                                 struct rte_mbuf *m = mbuf;
796                                 uint16_t written = 0;
797                                 uint16_t to_write = 0;
798                                 bool new_mbuf = true;
799                                 uint16_t write_off = 0;
800
801                                 /* copy part of packet to first area */
802                                 while (m && written < write_len) {
803                                         to_write = RTE_MIN(m->data_len,
804                                                         write_len - written);
805                                         rte_memcpy(tmp_dst,
806                                                 rte_pktmbuf_mtod(m,
807                                                         const void *),
808                                                 to_write);
809
810                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
811                                                 to_write;
812                                         if (m->data_len <= write_len -
813                                                         written) {
814                                                 m = m->next;
815                                                 new_mbuf = true;
816                                         } else {
817                                                 new_mbuf = false;
818                                         }
819                                         written += to_write;
820                                 }
821
822                                 if (lck->next)
823                                         dst = lck->next->start;
824
825                                 tmp_dst = dst;
826                                 written = 0;
827                                 write_off = new_mbuf ? 0 : to_write;
828
829                                 /* copy part of packet to second area */
830                                 while (m && written < pkt_len - write_len) {
831                                         rte_memcpy(tmp_dst, (const void *)
832                                                 (rte_pktmbuf_mtod(m,
833                                                 uint8_t *) + write_off),
834                                                 m->data_len - write_off);
835
836                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
837                                                 (m->data_len - write_off);
838                                         written += m->data_len - write_off;
839                                         m = m->next;
840                                         write_off = 0;
841                                 }
842                         }
843
844                         dst = ((uint8_t *)dst) + rem_len;
845                         unlock_size += hwpkt_len;
846                         lock_size = lock_size2 - rem_len;
847                         lock_size2 = 0;
848
849                         rte_pktmbuf_free(mbuf);
850                         num_tx++;
851                 }
852
853                 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
854                 pkt_left--;
855         }
856
857         sze_q->tx_pkts += num_tx;
858         sze_q->err_pkts += nb_pkts - num_tx;
859         sze_q->tx_bytes += num_bytes;
860         return num_tx;
861 }
862
863 static int
864 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
865 {
866         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
867         int ret;
868         struct pmd_internals *internals = (struct pmd_internals *)
869                 dev->data->dev_private;
870
871         if (rxq->sze == NULL) {
872                 uint32_t rx = 1 << rxq->rx_channel;
873                 uint32_t tx = 0;
874                 rxq->sze = szedata_open(internals->sze_dev);
875                 if (rxq->sze == NULL)
876                         return -EINVAL;
877                 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
878                 if (ret != 0 || rx == 0)
879                         goto err;
880         }
881
882         ret = szedata_start(rxq->sze);
883         if (ret != 0)
884                 goto err;
885         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
886         return 0;
887
888 err:
889         szedata_close(rxq->sze);
890         rxq->sze = NULL;
891         return -EINVAL;
892 }
893
894 static int
895 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
896 {
897         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
898
899         if (rxq->sze != NULL) {
900                 szedata_close(rxq->sze);
901                 rxq->sze = NULL;
902         }
903
904         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
905         return 0;
906 }
907
908 static int
909 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
910 {
911         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
912         int ret;
913         struct pmd_internals *internals = (struct pmd_internals *)
914                 dev->data->dev_private;
915
916         if (txq->sze == NULL) {
917                 uint32_t rx = 0;
918                 uint32_t tx = 1 << txq->tx_channel;
919                 txq->sze = szedata_open(internals->sze_dev);
920                 if (txq->sze == NULL)
921                         return -EINVAL;
922                 ret = szedata_subscribe3(txq->sze, &rx, &tx);
923                 if (ret != 0 || tx == 0)
924                         goto err;
925         }
926
927         ret = szedata_start(txq->sze);
928         if (ret != 0)
929                 goto err;
930         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
931         return 0;
932
933 err:
934         szedata_close(txq->sze);
935         txq->sze = NULL;
936         return -EINVAL;
937 }
938
939 static int
940 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
941 {
942         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
943
944         if (txq->sze != NULL) {
945                 szedata_close(txq->sze);
946                 txq->sze = NULL;
947         }
948
949         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
950         return 0;
951 }
952
953 static int
954 eth_dev_start(struct rte_eth_dev *dev)
955 {
956         int ret;
957         uint16_t i;
958         uint16_t nb_rx = dev->data->nb_rx_queues;
959         uint16_t nb_tx = dev->data->nb_tx_queues;
960
961         for (i = 0; i < nb_rx; i++) {
962                 ret = eth_rx_queue_start(dev, i);
963                 if (ret != 0)
964                         goto err_rx;
965         }
966
967         for (i = 0; i < nb_tx; i++) {
968                 ret = eth_tx_queue_start(dev, i);
969                 if (ret != 0)
970                         goto err_tx;
971         }
972
973         return 0;
974
975 err_tx:
976         for (i = 0; i < nb_tx; i++)
977                 eth_tx_queue_stop(dev, i);
978 err_rx:
979         for (i = 0; i < nb_rx; i++)
980                 eth_rx_queue_stop(dev, i);
981         return ret;
982 }
983
984 static void
985 eth_dev_stop(struct rte_eth_dev *dev)
986 {
987         uint16_t i;
988         uint16_t nb_rx = dev->data->nb_rx_queues;
989         uint16_t nb_tx = dev->data->nb_tx_queues;
990
991         for (i = 0; i < nb_tx; i++)
992                 eth_tx_queue_stop(dev, i);
993
994         for (i = 0; i < nb_rx; i++)
995                 eth_rx_queue_stop(dev, i);
996 }
997
998 static int
999 eth_dev_configure(struct rte_eth_dev *dev)
1000 {
1001         struct rte_eth_dev_data *data = dev->data;
1002         if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
1003                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1004                 data->scattered_rx = 1;
1005         } else {
1006                 dev->rx_pkt_burst = eth_szedata2_rx;
1007                 data->scattered_rx = 0;
1008         }
1009         return 0;
1010 }
1011
1012 static void
1013 eth_dev_info(struct rte_eth_dev *dev,
1014                 struct rte_eth_dev_info *dev_info)
1015 {
1016         struct pmd_internals *internals = dev->data->dev_private;
1017
1018         dev_info->if_index = 0;
1019         dev_info->max_mac_addrs = 1;
1020         dev_info->max_rx_pktlen = (uint32_t)-1;
1021         dev_info->max_rx_queues = internals->max_rx_queues;
1022         dev_info->max_tx_queues = internals->max_tx_queues;
1023         dev_info->min_rx_bufsize = 0;
1024         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
1025         dev_info->tx_offload_capa = 0;
1026         dev_info->rx_queue_offload_capa = 0;
1027         dev_info->tx_queue_offload_capa = 0;
1028         dev_info->speed_capa = ETH_LINK_SPEED_100G;
1029 }
1030
1031 static int
1032 eth_stats_get(struct rte_eth_dev *dev,
1033                 struct rte_eth_stats *stats)
1034 {
1035         uint16_t i;
1036         uint16_t nb_rx = dev->data->nb_rx_queues;
1037         uint16_t nb_tx = dev->data->nb_tx_queues;
1038         uint64_t rx_total = 0;
1039         uint64_t tx_total = 0;
1040         uint64_t tx_err_total = 0;
1041         uint64_t rx_total_bytes = 0;
1042         uint64_t tx_total_bytes = 0;
1043
1044         for (i = 0; i < nb_rx; i++) {
1045                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1046
1047                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1048                         stats->q_ipackets[i] = rxq->rx_pkts;
1049                         stats->q_ibytes[i] = rxq->rx_bytes;
1050                 }
1051                 rx_total += rxq->rx_pkts;
1052                 rx_total_bytes += rxq->rx_bytes;
1053         }
1054
1055         for (i = 0; i < nb_tx; i++) {
1056                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1057
1058                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1059                         stats->q_opackets[i] = txq->tx_pkts;
1060                         stats->q_obytes[i] = txq->tx_bytes;
1061                         stats->q_errors[i] = txq->err_pkts;
1062                 }
1063                 tx_total += txq->tx_pkts;
1064                 tx_total_bytes += txq->tx_bytes;
1065                 tx_err_total += txq->err_pkts;
1066         }
1067
1068         stats->ipackets = rx_total;
1069         stats->opackets = tx_total;
1070         stats->ibytes = rx_total_bytes;
1071         stats->obytes = tx_total_bytes;
1072         stats->oerrors = tx_err_total;
1073         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1074
1075         return 0;
1076 }
1077
1078 static void
1079 eth_stats_reset(struct rte_eth_dev *dev)
1080 {
1081         uint16_t i;
1082         uint16_t nb_rx = dev->data->nb_rx_queues;
1083         uint16_t nb_tx = dev->data->nb_tx_queues;
1084
1085         for (i = 0; i < nb_rx; i++) {
1086                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1087                 rxq->rx_pkts = 0;
1088                 rxq->rx_bytes = 0;
1089                 rxq->err_pkts = 0;
1090         }
1091         for (i = 0; i < nb_tx; i++) {
1092                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1093                 txq->tx_pkts = 0;
1094                 txq->tx_bytes = 0;
1095                 txq->err_pkts = 0;
1096         }
1097 }
1098
1099 static void
1100 eth_rx_queue_release(void *q)
1101 {
1102         struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1103
1104         if (rxq != NULL) {
1105                 if (rxq->sze != NULL)
1106                         szedata_close(rxq->sze);
1107                 rte_free(rxq);
1108         }
1109 }
1110
1111 static void
1112 eth_tx_queue_release(void *q)
1113 {
1114         struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1115
1116         if (txq != NULL) {
1117                 if (txq->sze != NULL)
1118                         szedata_close(txq->sze);
1119                 rte_free(txq);
1120         }
1121 }
1122
1123 static void
1124 eth_dev_close(struct rte_eth_dev *dev)
1125 {
1126         uint16_t i;
1127         uint16_t nb_rx = dev->data->nb_rx_queues;
1128         uint16_t nb_tx = dev->data->nb_tx_queues;
1129
1130         eth_dev_stop(dev);
1131
1132         for (i = 0; i < nb_rx; i++) {
1133                 eth_rx_queue_release(dev->data->rx_queues[i]);
1134                 dev->data->rx_queues[i] = NULL;
1135         }
1136         dev->data->nb_rx_queues = 0;
1137         for (i = 0; i < nb_tx; i++) {
1138                 eth_tx_queue_release(dev->data->tx_queues[i]);
1139                 dev->data->tx_queues[i] = NULL;
1140         }
1141         dev->data->nb_tx_queues = 0;
1142 }
1143
1144 /**
1145  * Function takes value from first IBUF status register.
1146  * Values in IBUF and OBUF should be same.
1147  *
1148  * @param internals
1149  *     Pointer to device private structure.
1150  * @return
1151  *     Link speed constant.
1152  */
1153 static inline enum szedata2_link_speed
1154 get_link_speed(const struct pmd_internals *internals)
1155 {
1156         const volatile struct szedata2_ibuf *ibuf =
1157                 ibuf_ptr_by_index(internals->pci_rsc, 0);
1158         uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
1159         switch (speed) {
1160         case 0x03:
1161                 return SZEDATA2_LINK_SPEED_10G;
1162         case 0x04:
1163                 return SZEDATA2_LINK_SPEED_40G;
1164         case 0x05:
1165                 return SZEDATA2_LINK_SPEED_100G;
1166         default:
1167                 return SZEDATA2_LINK_SPEED_DEFAULT;
1168         }
1169 }
1170
1171 static int
1172 eth_link_update(struct rte_eth_dev *dev,
1173                 int wait_to_complete __rte_unused)
1174 {
1175         struct rte_eth_link link;
1176         struct pmd_internals *internals = (struct pmd_internals *)
1177                 dev->data->dev_private;
1178         const volatile struct szedata2_ibuf *ibuf;
1179         uint32_t i;
1180         bool link_is_up = false;
1181
1182         memset(&link, 0, sizeof(link));
1183
1184         switch (get_link_speed(internals)) {
1185         case SZEDATA2_LINK_SPEED_10G:
1186                 link.link_speed = ETH_SPEED_NUM_10G;
1187                 break;
1188         case SZEDATA2_LINK_SPEED_40G:
1189                 link.link_speed = ETH_SPEED_NUM_40G;
1190                 break;
1191         case SZEDATA2_LINK_SPEED_100G:
1192                 link.link_speed = ETH_SPEED_NUM_100G;
1193                 break;
1194         default:
1195                 link.link_speed = ETH_SPEED_NUM_10G;
1196                 break;
1197         }
1198
1199         /* szedata2 uses only full duplex */
1200         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1201
1202         for (i = 0; i < szedata2_ibuf_count; i++) {
1203                 ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
1204                 /*
1205                  * Link is considered up if at least one ibuf is enabled
1206                  * and up.
1207                  */
1208                 if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
1209                         link_is_up = true;
1210                         break;
1211                 }
1212         }
1213
1214         link.link_status = link_is_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1215
1216         link.link_autoneg = ETH_LINK_FIXED;
1217
1218         rte_eth_linkstatus_set(dev, &link);
1219         return 0;
1220 }
1221
1222 static int
1223 eth_dev_set_link_up(struct rte_eth_dev *dev)
1224 {
1225         struct pmd_internals *internals = (struct pmd_internals *)
1226                 dev->data->dev_private;
1227         uint32_t i;
1228
1229         for (i = 0; i < szedata2_ibuf_count; i++)
1230                 ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
1231         for (i = 0; i < szedata2_obuf_count; i++)
1232                 obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
1233         return 0;
1234 }
1235
1236 static int
1237 eth_dev_set_link_down(struct rte_eth_dev *dev)
1238 {
1239         struct pmd_internals *internals = (struct pmd_internals *)
1240                 dev->data->dev_private;
1241         uint32_t i;
1242
1243         for (i = 0; i < szedata2_ibuf_count; i++)
1244                 ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
1245         for (i = 0; i < szedata2_obuf_count; i++)
1246                 obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
1247         return 0;
1248 }
1249
1250 static int
1251 eth_rx_queue_setup(struct rte_eth_dev *dev,
1252                 uint16_t rx_queue_id,
1253                 uint16_t nb_rx_desc __rte_unused,
1254                 unsigned int socket_id,
1255                 const struct rte_eth_rxconf *rx_conf __rte_unused,
1256                 struct rte_mempool *mb_pool)
1257 {
1258         struct pmd_internals *internals = dev->data->dev_private;
1259         struct szedata2_rx_queue *rxq;
1260         int ret;
1261         uint32_t rx = 1 << rx_queue_id;
1262         uint32_t tx = 0;
1263
1264         if (dev->data->rx_queues[rx_queue_id] != NULL) {
1265                 eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
1266                 dev->data->rx_queues[rx_queue_id] = NULL;
1267         }
1268
1269         rxq = rte_zmalloc_socket("szedata2 rx queue",
1270                         sizeof(struct szedata2_rx_queue),
1271                         RTE_CACHE_LINE_SIZE, socket_id);
1272         if (rxq == NULL) {
1273                 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
1274                                 "%" PRIu16 "!", rx_queue_id);
1275                 return -ENOMEM;
1276         }
1277
1278         rxq->priv = internals;
1279         rxq->sze = szedata_open(internals->sze_dev);
1280         if (rxq->sze == NULL) {
1281                 PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
1282                                 "%" PRIu16 "!", rx_queue_id);
1283                 eth_rx_queue_release(rxq);
1284                 return -EINVAL;
1285         }
1286         ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1287         if (ret != 0 || rx == 0) {
1288                 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
1289                                 "%" PRIu16 "!", rx_queue_id);
1290                 eth_rx_queue_release(rxq);
1291                 return -EINVAL;
1292         }
1293         rxq->rx_channel = rx_queue_id;
1294         rxq->in_port = dev->data->port_id;
1295         rxq->mb_pool = mb_pool;
1296         rxq->rx_pkts = 0;
1297         rxq->rx_bytes = 0;
1298         rxq->err_pkts = 0;
1299
1300         dev->data->rx_queues[rx_queue_id] = rxq;
1301
1302         PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
1303                         "%u.", rx_queue_id, socket_id);
1304
1305         return 0;
1306 }
1307
1308 static int
1309 eth_tx_queue_setup(struct rte_eth_dev *dev,
1310                 uint16_t tx_queue_id,
1311                 uint16_t nb_tx_desc __rte_unused,
1312                 unsigned int socket_id,
1313                 const struct rte_eth_txconf *tx_conf __rte_unused)
1314 {
1315         struct pmd_internals *internals = dev->data->dev_private;
1316         struct szedata2_tx_queue *txq;
1317         int ret;
1318         uint32_t rx = 0;
1319         uint32_t tx = 1 << tx_queue_id;
1320
1321         if (dev->data->tx_queues[tx_queue_id] != NULL) {
1322                 eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
1323                 dev->data->tx_queues[tx_queue_id] = NULL;
1324         }
1325
1326         txq = rte_zmalloc_socket("szedata2 tx queue",
1327                         sizeof(struct szedata2_tx_queue),
1328                         RTE_CACHE_LINE_SIZE, socket_id);
1329         if (txq == NULL) {
1330                 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
1331                                 "%" PRIu16 "!", tx_queue_id);
1332                 return -ENOMEM;
1333         }
1334
1335         txq->priv = internals;
1336         txq->sze = szedata_open(internals->sze_dev);
1337         if (txq->sze == NULL) {
1338                 PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
1339                                 "%" PRIu16 "!", tx_queue_id);
1340                 eth_tx_queue_release(txq);
1341                 return -EINVAL;
1342         }
1343         ret = szedata_subscribe3(txq->sze, &rx, &tx);
1344         if (ret != 0 || tx == 0) {
1345                 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
1346                                 "%" PRIu16 "!", tx_queue_id);
1347                 eth_tx_queue_release(txq);
1348                 return -EINVAL;
1349         }
1350         txq->tx_channel = tx_queue_id;
1351         txq->tx_pkts = 0;
1352         txq->tx_bytes = 0;
1353         txq->err_pkts = 0;
1354
1355         dev->data->tx_queues[tx_queue_id] = txq;
1356
1357         PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
1358                         "%u.", tx_queue_id, socket_id);
1359
1360         return 0;
1361 }
1362
1363 static int
1364 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1365                 struct ether_addr *mac_addr __rte_unused)
1366 {
1367         return 0;
1368 }
1369
1370 static void
1371 eth_promiscuous_enable(struct rte_eth_dev *dev)
1372 {
1373         struct pmd_internals *internals = (struct pmd_internals *)
1374                 dev->data->dev_private;
1375         uint32_t i;
1376
1377         for (i = 0; i < szedata2_ibuf_count; i++) {
1378                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1379                                 SZEDATA2_MAC_CHMODE_PROMISC);
1380         }
1381 }
1382
1383 static void
1384 eth_promiscuous_disable(struct rte_eth_dev *dev)
1385 {
1386         struct pmd_internals *internals = (struct pmd_internals *)
1387                 dev->data->dev_private;
1388         uint32_t i;
1389
1390         for (i = 0; i < szedata2_ibuf_count; i++) {
1391                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1392                                 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1393         }
1394 }
1395
1396 static void
1397 eth_allmulticast_enable(struct rte_eth_dev *dev)
1398 {
1399         struct pmd_internals *internals = (struct pmd_internals *)
1400                 dev->data->dev_private;
1401         uint32_t i;
1402
1403         for (i = 0; i < szedata2_ibuf_count; i++) {
1404                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1405                                 SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
1406         }
1407 }
1408
1409 static void
1410 eth_allmulticast_disable(struct rte_eth_dev *dev)
1411 {
1412         struct pmd_internals *internals = (struct pmd_internals *)
1413                 dev->data->dev_private;
1414         uint32_t i;
1415
1416         for (i = 0; i < szedata2_ibuf_count; i++) {
1417                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1418                                 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1419         }
1420 }
1421
1422 static const struct eth_dev_ops ops = {
1423         .dev_start          = eth_dev_start,
1424         .dev_stop           = eth_dev_stop,
1425         .dev_set_link_up    = eth_dev_set_link_up,
1426         .dev_set_link_down  = eth_dev_set_link_down,
1427         .dev_close          = eth_dev_close,
1428         .dev_configure      = eth_dev_configure,
1429         .dev_infos_get      = eth_dev_info,
1430         .promiscuous_enable   = eth_promiscuous_enable,
1431         .promiscuous_disable  = eth_promiscuous_disable,
1432         .allmulticast_enable  = eth_allmulticast_enable,
1433         .allmulticast_disable = eth_allmulticast_disable,
1434         .rx_queue_start     = eth_rx_queue_start,
1435         .rx_queue_stop      = eth_rx_queue_stop,
1436         .tx_queue_start     = eth_tx_queue_start,
1437         .tx_queue_stop      = eth_tx_queue_stop,
1438         .rx_queue_setup     = eth_rx_queue_setup,
1439         .tx_queue_setup     = eth_tx_queue_setup,
1440         .rx_queue_release   = eth_rx_queue_release,
1441         .tx_queue_release   = eth_tx_queue_release,
1442         .link_update        = eth_link_update,
1443         .stats_get          = eth_stats_get,
1444         .stats_reset        = eth_stats_reset,
1445         .mac_addr_set       = eth_mac_addr_set,
1446 };
1447
1448 /*
1449  * This function goes through sysfs and looks for an index of szedata2
1450  * device file (/dev/szedataIIX, where X is the index).
1451  *
1452  * @return
1453  *           0 on success
1454  *          -1 on error
1455  */
1456 static int
1457 get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
1458 {
1459         DIR *dir;
1460         struct dirent *entry;
1461         int ret;
1462         uint32_t tmp_index;
1463         FILE *fd;
1464         char pcislot_path[PATH_MAX];
1465         uint32_t domain;
1466         uint8_t bus;
1467         uint8_t devid;
1468         uint8_t function;
1469
1470         dir = opendir("/sys/class/combo");
1471         if (dir == NULL)
1472                 return -1;
1473
1474         /*
1475          * Iterate through all combosixX directories.
1476          * When the value in /sys/class/combo/combosixX/device/pcislot
1477          * file is the location of the ethernet device dev, "X" is the
1478          * index of the device.
1479          */
1480         while ((entry = readdir(dir)) != NULL) {
1481                 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1482                 if (ret != 1)
1483                         continue;
1484
1485                 snprintf(pcislot_path, PATH_MAX,
1486                         "/sys/class/combo/combosix%u/device/pcislot",
1487                         tmp_index);
1488
1489                 fd = fopen(pcislot_path, "r");
1490                 if (fd == NULL)
1491                         continue;
1492
1493                 ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
1494                                 &domain, &bus, &devid, &function);
1495                 fclose(fd);
1496                 if (ret != 4)
1497                         continue;
1498
1499                 if (pcislot_addr->domain == domain &&
1500                                 pcislot_addr->bus == bus &&
1501                                 pcislot_addr->devid == devid &&
1502                                 pcislot_addr->function == function) {
1503                         *index = tmp_index;
1504                         closedir(dir);
1505                         return 0;
1506                 }
1507         }
1508
1509         closedir(dir);
1510         return -1;
1511 }
1512
1513 static int
1514 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1515 {
1516         struct rte_eth_dev_data *data = dev->data;
1517         struct pmd_internals *internals = (struct pmd_internals *)
1518                 data->dev_private;
1519         struct szedata *szedata_temp;
1520         int ret;
1521         uint32_t szedata2_index;
1522         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1523         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1524         struct rte_mem_resource *pci_rsc =
1525                 &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1526         char rsc_filename[PATH_MAX];
1527         void *pci_resource_ptr = NULL;
1528         int fd;
1529
1530         PMD_INIT_LOG(INFO, "Initializing szedata2 device (" PCI_PRI_FMT ")",
1531                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1532                         pci_addr->function);
1533
1534         internals->dev = dev;
1535
1536         /* Get index of szedata2 device file and create path to device file */
1537         ret = get_szedata2_index(pci_addr, &szedata2_index);
1538         if (ret != 0) {
1539                 PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
1540                 return -ENODEV;
1541         }
1542         snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1543                         szedata2_index);
1544
1545         PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev);
1546
1547         /*
1548          * Get number of available DMA RX and TX channels, which is maximum
1549          * number of queues that can be created and store it in private device
1550          * data structure.
1551          */
1552         szedata_temp = szedata_open(internals->sze_dev);
1553         if (szedata_temp == NULL) {
1554                 PMD_INIT_LOG(ERR, "szedata_open(): failed to open %s",
1555                                 internals->sze_dev);
1556                 return -EINVAL;
1557         }
1558         internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1559                         SZE2_DIR_RX);
1560         internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1561                         SZE2_DIR_TX);
1562         szedata_close(szedata_temp);
1563
1564         PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
1565                         internals->max_rx_queues, internals->max_tx_queues);
1566
1567         /* Set rx, tx burst functions */
1568         if (data->scattered_rx == 1)
1569                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1570         else
1571                 dev->rx_pkt_burst = eth_szedata2_rx;
1572         dev->tx_pkt_burst = eth_szedata2_tx;
1573
1574         /* Set function callbacks for Ethernet API */
1575         dev->dev_ops = &ops;
1576
1577         rte_eth_copy_pci_info(dev, pci_dev);
1578
1579         /* mmap pci resource0 file to rte_mem_resource structure */
1580         if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1581                         0) {
1582                 PMD_INIT_LOG(ERR, "Missing resource%u file",
1583                                 PCI_RESOURCE_NUMBER);
1584                 return -EINVAL;
1585         }
1586         snprintf(rsc_filename, PATH_MAX,
1587                 "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
1588                 pci_addr->domain, pci_addr->bus,
1589                 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1590         fd = open(rsc_filename, O_RDWR);
1591         if (fd < 0) {
1592                 PMD_INIT_LOG(ERR, "Could not open file %s", rsc_filename);
1593                 return -EINVAL;
1594         }
1595
1596         pci_resource_ptr = mmap(0,
1597                         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1598                         PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1599         close(fd);
1600         if (pci_resource_ptr == MAP_FAILED) {
1601                 PMD_INIT_LOG(ERR, "Could not mmap file %s (fd = %d)",
1602                                 rsc_filename, fd);
1603                 return -EINVAL;
1604         }
1605         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
1606         internals->pci_rsc = pci_rsc;
1607
1608         PMD_INIT_LOG(DEBUG, "resource%u phys_addr = 0x%llx len = %llu "
1609                         "virt addr = %llx", PCI_RESOURCE_NUMBER,
1610                         (unsigned long long)pci_rsc->phys_addr,
1611                         (unsigned long long)pci_rsc->len,
1612                         (unsigned long long)pci_rsc->addr);
1613
1614         /* Get link state */
1615         eth_link_update(dev, 0);
1616
1617         /* Allocate space for one mac address */
1618         data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1619                         RTE_CACHE_LINE_SIZE);
1620         if (data->mac_addrs == NULL) {
1621                 PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
1622                 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1623                        pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1624                 return -EINVAL;
1625         }
1626
1627         ether_addr_copy(&eth_addr, data->mac_addrs);
1628
1629         /* At initial state COMBO card is in promiscuous mode so disable it */
1630         eth_promiscuous_disable(dev);
1631
1632         PMD_INIT_LOG(INFO, "szedata2 device ("
1633                         PCI_PRI_FMT ") successfully initialized",
1634                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1635                         pci_addr->function);
1636
1637         return 0;
1638 }
1639
1640 static int
1641 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1642 {
1643         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1644         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1645
1646         rte_free(dev->data->mac_addrs);
1647         dev->data->mac_addrs = NULL;
1648         munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1649                pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1650
1651         PMD_DRV_LOG(INFO, "szedata2 device ("
1652                         PCI_PRI_FMT ") successfully uninitialized",
1653                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1654                         pci_addr->function);
1655
1656         return 0;
1657 }
1658
1659 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1660         {
1661                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1662                                 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1663         },
1664         {
1665                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1666                                 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1667         },
1668         {
1669                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1670                                 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1671         },
1672         {
1673                 .vendor_id = 0,
1674         }
1675 };
1676
1677 static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1678         struct rte_pci_device *pci_dev)
1679 {
1680         return rte_eth_dev_pci_generic_probe(pci_dev,
1681                 sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
1682 }
1683
1684 static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
1685 {
1686         return rte_eth_dev_pci_generic_remove(pci_dev,
1687                 rte_szedata2_eth_dev_uninit);
1688 }
1689
1690 static struct rte_pci_driver szedata2_eth_driver = {
1691         .id_table = rte_szedata2_pci_id_table,
1692         .probe = szedata2_eth_pci_probe,
1693         .remove = szedata2_eth_pci_remove,
1694 };
1695
1696 RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
1697 RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
1698 RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
1699         "* combo6core & combov3 & szedata2 & szedata2_cv3");
1700
1701 RTE_INIT(szedata2_init_log);
1702 static void
1703 szedata2_init_log(void)
1704 {
1705         szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
1706         if (szedata2_logtype_init >= 0)
1707                 rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
1708         szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
1709         if (szedata2_logtype_driver >= 0)
1710                 rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);
1711 }