04dc8bf31e306906115d09643e2f00a4930ddf13
[dpdk.git] / drivers / net / szedata2 / rte_eth_szedata2.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2015 - 2016 CESNET
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of CESNET nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <unistd.h>
36 #include <stdbool.h>
37 #include <err.h>
38 #include <sys/types.h>
39 #include <dirent.h>
40 #include <sys/stat.h>
41 #include <fcntl.h>
42 #include <sys/mman.h>
43
44 #include <libsze2.h>
45
46 #include <rte_mbuf.h>
47 #include <rte_ethdev_driver.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_malloc.h>
50 #include <rte_memcpy.h>
51 #include <rte_kvargs.h>
52 #include <rte_dev.h>
53
54 #include "rte_eth_szedata2.h"
55 #include "szedata2_iobuf.h"
56
57 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
58 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
59 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
60
61 /**
62  * size of szedata2_packet header with alignment
63  */
64 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
65
66 #define RTE_SZEDATA2_DRIVER_NAME net_szedata2
67
68 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
69
70 struct pmd_internals {
71         struct rte_eth_dev *dev;
72         uint16_t max_rx_queues;
73         uint16_t max_tx_queues;
74         char sze_dev[PATH_MAX];
75         struct rte_mem_resource *pci_rsc;
76 };
77
78 struct szedata2_rx_queue {
79         struct pmd_internals *priv;
80         struct szedata *sze;
81         uint8_t rx_channel;
82         uint16_t in_port;
83         struct rte_mempool *mb_pool;
84         volatile uint64_t rx_pkts;
85         volatile uint64_t rx_bytes;
86         volatile uint64_t err_pkts;
87 };
88
89 struct szedata2_tx_queue {
90         struct pmd_internals *priv;
91         struct szedata *sze;
92         uint8_t tx_channel;
93         volatile uint64_t tx_pkts;
94         volatile uint64_t tx_bytes;
95         volatile uint64_t err_pkts;
96 };
97
98 static struct ether_addr eth_addr = {
99         .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
100 };
101
102 static uint16_t
103 eth_szedata2_rx(void *queue,
104                 struct rte_mbuf **bufs,
105                 uint16_t nb_pkts)
106 {
107         unsigned int i;
108         struct rte_mbuf *mbuf;
109         struct szedata2_rx_queue *sze_q = queue;
110         struct rte_pktmbuf_pool_private *mbp_priv;
111         uint16_t num_rx = 0;
112         uint16_t buf_size;
113         uint16_t sg_size;
114         uint16_t hw_size;
115         uint16_t packet_size;
116         uint64_t num_bytes = 0;
117         struct szedata *sze = sze_q->sze;
118         uint8_t *header_ptr = NULL; /* header of packet */
119         uint8_t *packet_ptr1 = NULL;
120         uint8_t *packet_ptr2 = NULL;
121         uint16_t packet_len1 = 0;
122         uint16_t packet_len2 = 0;
123         uint16_t hw_data_align;
124
125         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
126                 return 0;
127
128         /*
129          * Reads the given number of packets from szedata2 channel given
130          * by queue and copies the packet data into a newly allocated mbuf
131          * to return.
132          */
133         for (i = 0; i < nb_pkts; i++) {
134                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
135
136                 if (unlikely(mbuf == NULL)) {
137                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
138                         break;
139                 }
140
141                 /* get the next sze packet */
142                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
143                                 sze->ct_rx_lck->next == NULL) {
144                         /* unlock old data */
145                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
146                         sze->ct_rx_lck_orig = NULL;
147                         sze->ct_rx_lck = NULL;
148                 }
149
150                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
151                         /* nothing to read, lock new data */
152                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
153                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
154
155                         if (sze->ct_rx_lck == NULL) {
156                                 /* nothing to lock */
157                                 rte_pktmbuf_free(mbuf);
158                                 break;
159                         }
160
161                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
162                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
163
164                         if (!sze->ct_rx_rem_bytes) {
165                                 rte_pktmbuf_free(mbuf);
166                                 break;
167                         }
168                 }
169
170                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
171                         /*
172                          * cut in header
173                          * copy parts of header to merge buffer
174                          */
175                         if (sze->ct_rx_lck->next == NULL) {
176                                 rte_pktmbuf_free(mbuf);
177                                 break;
178                         }
179
180                         /* copy first part of header */
181                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
182                                         sze->ct_rx_rem_bytes);
183
184                         /* copy second part of header */
185                         sze->ct_rx_lck = sze->ct_rx_lck->next;
186                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
187                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
188                                 sze->ct_rx_cur_ptr,
189                                 RTE_SZE2_PACKET_HEADER_SIZE -
190                                 sze->ct_rx_rem_bytes);
191
192                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
193                                 sze->ct_rx_rem_bytes;
194                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
195                                 RTE_SZE2_PACKET_HEADER_SIZE +
196                                 sze->ct_rx_rem_bytes;
197
198                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
199                 } else {
200                         /* not cut */
201                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
202                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
203                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
204                 }
205
206                 sg_size = le16toh(*((uint16_t *)header_ptr));
207                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
208                 packet_size = sg_size -
209                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
210
211
212                 /* checks if packet all right */
213                 if (!sg_size)
214                         errx(5, "Zero segsize");
215
216                 /* check sg_size and hwsize */
217                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
218                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
219                                 "hwsize: %d", sg_size, hw_size);
220                 }
221
222                 hw_data_align =
223                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
224                         RTE_SZE2_PACKET_HEADER_SIZE;
225
226                 if (sze->ct_rx_rem_bytes >=
227                                 (uint16_t)(sg_size -
228                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
229                         /* no cut */
230                         /* one packet ready - go to another */
231                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
232                         packet_len1 = packet_size;
233                         packet_ptr2 = NULL;
234                         packet_len2 = 0;
235
236                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
237                                 RTE_SZE2_PACKET_HEADER_SIZE;
238                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
239                                 RTE_SZE2_PACKET_HEADER_SIZE;
240                 } else {
241                         /* cut in data */
242                         if (sze->ct_rx_lck->next == NULL) {
243                                 errx(6, "Need \"next\" lock, "
244                                         "but it is missing: %u",
245                                         sze->ct_rx_rem_bytes);
246                         }
247
248                         /* skip hw data */
249                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
250                                 uint16_t rem_size = hw_data_align -
251                                         sze->ct_rx_rem_bytes;
252
253                                 /* MOVE to next lock */
254                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
255                                 sze->ct_rx_cur_ptr =
256                                         (void *)(((uint8_t *)
257                                         (sze->ct_rx_lck->start)) + rem_size);
258
259                                 packet_ptr1 = sze->ct_rx_cur_ptr;
260                                 packet_len1 = packet_size;
261                                 packet_ptr2 = NULL;
262                                 packet_len2 = 0;
263
264                                 sze->ct_rx_cur_ptr +=
265                                         RTE_SZE2_ALIGN8(packet_size);
266                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
267                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
268                         } else {
269                                 /* get pointer and length from first part */
270                                 packet_ptr1 = sze->ct_rx_cur_ptr +
271                                         hw_data_align;
272                                 packet_len1 = sze->ct_rx_rem_bytes -
273                                         hw_data_align;
274
275                                 /* MOVE to next lock */
276                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
277                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
278
279                                 /* get pointer and length from second part */
280                                 packet_ptr2 = sze->ct_rx_cur_ptr;
281                                 packet_len2 = packet_size - packet_len1;
282
283                                 sze->ct_rx_cur_ptr +=
284                                         RTE_SZE2_ALIGN8(packet_size) -
285                                         packet_len1;
286                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
287                                         (RTE_SZE2_ALIGN8(packet_size) -
288                                          packet_len1);
289                         }
290                 }
291
292                 if (unlikely(packet_ptr1 == NULL)) {
293                         rte_pktmbuf_free(mbuf);
294                         break;
295                 }
296
297                 /* get the space available for data in the mbuf */
298                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
299                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
300                                 RTE_PKTMBUF_HEADROOM);
301
302                 if (packet_size <= buf_size) {
303                         /* sze packet will fit in one mbuf, go ahead and copy */
304                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
305                                         packet_ptr1, packet_len1);
306                         if (packet_ptr2 != NULL) {
307                                 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
308                                         uint8_t *) + packet_len1),
309                                         packet_ptr2, packet_len2);
310                         }
311                         mbuf->data_len = (uint16_t)packet_size;
312
313                         mbuf->pkt_len = packet_size;
314                         mbuf->port = sze_q->in_port;
315                         bufs[num_rx] = mbuf;
316                         num_rx++;
317                         num_bytes += packet_size;
318                 } else {
319                         /*
320                          * sze packet will not fit in one mbuf,
321                          * scattered mode is not enabled, drop packet
322                          */
323                         RTE_LOG(ERR, PMD,
324                                 "SZE segment %d bytes will not fit in one mbuf "
325                                 "(%d bytes), scattered mode is not enabled, "
326                                 "drop packet!!\n",
327                                 packet_size, buf_size);
328                         rte_pktmbuf_free(mbuf);
329                 }
330         }
331
332         sze_q->rx_pkts += num_rx;
333         sze_q->rx_bytes += num_bytes;
334         return num_rx;
335 }
336
337 static uint16_t
338 eth_szedata2_rx_scattered(void *queue,
339                 struct rte_mbuf **bufs,
340                 uint16_t nb_pkts)
341 {
342         unsigned int i;
343         struct rte_mbuf *mbuf;
344         struct szedata2_rx_queue *sze_q = queue;
345         struct rte_pktmbuf_pool_private *mbp_priv;
346         uint16_t num_rx = 0;
347         uint16_t buf_size;
348         uint16_t sg_size;
349         uint16_t hw_size;
350         uint16_t packet_size;
351         uint64_t num_bytes = 0;
352         struct szedata *sze = sze_q->sze;
353         uint8_t *header_ptr = NULL; /* header of packet */
354         uint8_t *packet_ptr1 = NULL;
355         uint8_t *packet_ptr2 = NULL;
356         uint16_t packet_len1 = 0;
357         uint16_t packet_len2 = 0;
358         uint16_t hw_data_align;
359         uint64_t *mbuf_failed_ptr =
360                 &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
361
362         if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
363                 return 0;
364
365         /*
366          * Reads the given number of packets from szedata2 channel given
367          * by queue and copies the packet data into a newly allocated mbuf
368          * to return.
369          */
370         for (i = 0; i < nb_pkts; i++) {
371                 const struct szedata_lock *ct_rx_lck_backup;
372                 unsigned int ct_rx_rem_bytes_backup;
373                 unsigned char *ct_rx_cur_ptr_backup;
374
375                 /* get the next sze packet */
376                 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
377                                 sze->ct_rx_lck->next == NULL) {
378                         /* unlock old data */
379                         szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
380                         sze->ct_rx_lck_orig = NULL;
381                         sze->ct_rx_lck = NULL;
382                 }
383
384                 /*
385                  * Store items from sze structure which can be changed
386                  * before mbuf allocating. Use these items in case of mbuf
387                  * allocating failure.
388                  */
389                 ct_rx_lck_backup = sze->ct_rx_lck;
390                 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
391                 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
392
393                 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
394                         /* nothing to read, lock new data */
395                         sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
396                         sze->ct_rx_lck_orig = sze->ct_rx_lck;
397
398                         /*
399                          * Backup items from sze structure must be updated
400                          * after locking to contain pointers to new locks.
401                          */
402                         ct_rx_lck_backup = sze->ct_rx_lck;
403                         ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
404                         ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
405
406                         if (sze->ct_rx_lck == NULL)
407                                 /* nothing to lock */
408                                 break;
409
410                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
411                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
412
413                         if (!sze->ct_rx_rem_bytes)
414                                 break;
415                 }
416
417                 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
418                         /*
419                          * cut in header - copy parts of header to merge buffer
420                          */
421                         if (sze->ct_rx_lck->next == NULL)
422                                 break;
423
424                         /* copy first part of header */
425                         rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
426                                         sze->ct_rx_rem_bytes);
427
428                         /* copy second part of header */
429                         sze->ct_rx_lck = sze->ct_rx_lck->next;
430                         sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
431                         rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
432                                 sze->ct_rx_cur_ptr,
433                                 RTE_SZE2_PACKET_HEADER_SIZE -
434                                 sze->ct_rx_rem_bytes);
435
436                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
437                                 sze->ct_rx_rem_bytes;
438                         sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
439                                 RTE_SZE2_PACKET_HEADER_SIZE +
440                                 sze->ct_rx_rem_bytes;
441
442                         header_ptr = (uint8_t *)sze->ct_rx_buffer;
443                 } else {
444                         /* not cut */
445                         header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
446                         sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
447                         sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
448                 }
449
450                 sg_size = le16toh(*((uint16_t *)header_ptr));
451                 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
452                 packet_size = sg_size -
453                         RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
454
455
456                 /* checks if packet all right */
457                 if (!sg_size)
458                         errx(5, "Zero segsize");
459
460                 /* check sg_size and hwsize */
461                 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
462                         errx(10, "Hwsize bigger than expected. Segsize: %d, "
463                                         "hwsize: %d", sg_size, hw_size);
464                 }
465
466                 hw_data_align =
467                         RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
468                         hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
469
470                 if (sze->ct_rx_rem_bytes >=
471                                 (uint16_t)(sg_size -
472                                 RTE_SZE2_PACKET_HEADER_SIZE)) {
473                         /* no cut */
474                         /* one packet ready - go to another */
475                         packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
476                         packet_len1 = packet_size;
477                         packet_ptr2 = NULL;
478                         packet_len2 = 0;
479
480                         sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
481                                 RTE_SZE2_PACKET_HEADER_SIZE;
482                         sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
483                                 RTE_SZE2_PACKET_HEADER_SIZE;
484                 } else {
485                         /* cut in data */
486                         if (sze->ct_rx_lck->next == NULL) {
487                                 errx(6, "Need \"next\" lock, but it is "
488                                         "missing: %u", sze->ct_rx_rem_bytes);
489                         }
490
491                         /* skip hw data */
492                         if (sze->ct_rx_rem_bytes <= hw_data_align) {
493                                 uint16_t rem_size = hw_data_align -
494                                         sze->ct_rx_rem_bytes;
495
496                                 /* MOVE to next lock */
497                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
498                                 sze->ct_rx_cur_ptr =
499                                         (void *)(((uint8_t *)
500                                         (sze->ct_rx_lck->start)) + rem_size);
501
502                                 packet_ptr1 = sze->ct_rx_cur_ptr;
503                                 packet_len1 = packet_size;
504                                 packet_ptr2 = NULL;
505                                 packet_len2 = 0;
506
507                                 sze->ct_rx_cur_ptr +=
508                                         RTE_SZE2_ALIGN8(packet_size);
509                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
510                                         rem_size - RTE_SZE2_ALIGN8(packet_size);
511                         } else {
512                                 /* get pointer and length from first part */
513                                 packet_ptr1 = sze->ct_rx_cur_ptr +
514                                         hw_data_align;
515                                 packet_len1 = sze->ct_rx_rem_bytes -
516                                         hw_data_align;
517
518                                 /* MOVE to next lock */
519                                 sze->ct_rx_lck = sze->ct_rx_lck->next;
520                                 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
521
522                                 /* get pointer and length from second part */
523                                 packet_ptr2 = sze->ct_rx_cur_ptr;
524                                 packet_len2 = packet_size - packet_len1;
525
526                                 sze->ct_rx_cur_ptr +=
527                                         RTE_SZE2_ALIGN8(packet_size) -
528                                         packet_len1;
529                                 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
530                                         (RTE_SZE2_ALIGN8(packet_size) -
531                                          packet_len1);
532                         }
533                 }
534
535                 if (unlikely(packet_ptr1 == NULL))
536                         break;
537
538                 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
539
540                 if (unlikely(mbuf == NULL)) {
541                         /*
542                          * Restore items from sze structure to state after
543                          * unlocking (eventually locking).
544                          */
545                         sze->ct_rx_lck = ct_rx_lck_backup;
546                         sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
547                         sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
548                         sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
549                         break;
550                 }
551
552                 /* get the space available for data in the mbuf */
553                 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
554                 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
555                                 RTE_PKTMBUF_HEADROOM);
556
557                 if (packet_size <= buf_size) {
558                         /* sze packet will fit in one mbuf, go ahead and copy */
559                         rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
560                                         packet_ptr1, packet_len1);
561                         if (packet_ptr2 != NULL) {
562                                 rte_memcpy((void *)
563                                         (rte_pktmbuf_mtod(mbuf, uint8_t *) +
564                                         packet_len1), packet_ptr2, packet_len2);
565                         }
566                         mbuf->data_len = (uint16_t)packet_size;
567                 } else {
568                         /*
569                          * sze packet will not fit in one mbuf,
570                          * scatter packet into more mbufs
571                          */
572                         struct rte_mbuf *m = mbuf;
573                         uint16_t len = rte_pktmbuf_tailroom(mbuf);
574
575                         /* copy first part of packet */
576                         /* fill first mbuf */
577                         rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
578                                 len);
579                         packet_len1 -= len;
580                         packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
581
582                         while (packet_len1 > 0) {
583                                 /* fill new mbufs */
584                                 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
585
586                                 if (unlikely(m->next == NULL)) {
587                                         rte_pktmbuf_free(mbuf);
588                                         /*
589                                          * Restore items from sze structure
590                                          * to state after unlocking (eventually
591                                          * locking).
592                                          */
593                                         sze->ct_rx_lck = ct_rx_lck_backup;
594                                         sze->ct_rx_rem_bytes =
595                                                 ct_rx_rem_bytes_backup;
596                                         sze->ct_rx_cur_ptr =
597                                                 ct_rx_cur_ptr_backup;
598                                         (*mbuf_failed_ptr)++;
599                                         goto finish;
600                                 }
601
602                                 m = m->next;
603
604                                 len = RTE_MIN(rte_pktmbuf_tailroom(m),
605                                         packet_len1);
606                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
607                                         packet_ptr1, len);
608
609                                 (mbuf->nb_segs)++;
610                                 packet_len1 -= len;
611                                 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
612                         }
613
614                         if (packet_ptr2 != NULL) {
615                                 /* copy second part of packet, if exists */
616                                 /* fill the rest of currently last mbuf */
617                                 len = rte_pktmbuf_tailroom(m);
618                                 rte_memcpy(rte_pktmbuf_append(mbuf, len),
619                                         packet_ptr2, len);
620                                 packet_len2 -= len;
621                                 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
622
623                                 while (packet_len2 > 0) {
624                                         /* fill new mbufs */
625                                         m->next = rte_pktmbuf_alloc(
626                                                         sze_q->mb_pool);
627
628                                         if (unlikely(m->next == NULL)) {
629                                                 rte_pktmbuf_free(mbuf);
630                                                 /*
631                                                  * Restore items from sze
632                                                  * structure to state after
633                                                  * unlocking (eventually
634                                                  * locking).
635                                                  */
636                                                 sze->ct_rx_lck =
637                                                         ct_rx_lck_backup;
638                                                 sze->ct_rx_rem_bytes =
639                                                         ct_rx_rem_bytes_backup;
640                                                 sze->ct_rx_cur_ptr =
641                                                         ct_rx_cur_ptr_backup;
642                                                 (*mbuf_failed_ptr)++;
643                                                 goto finish;
644                                         }
645
646                                         m = m->next;
647
648                                         len = RTE_MIN(rte_pktmbuf_tailroom(m),
649                                                 packet_len2);
650                                         rte_memcpy(
651                                                 rte_pktmbuf_append(mbuf, len),
652                                                 packet_ptr2, len);
653
654                                         (mbuf->nb_segs)++;
655                                         packet_len2 -= len;
656                                         packet_ptr2 = ((uint8_t *)packet_ptr2) +
657                                                 len;
658                                 }
659                         }
660                 }
661                 mbuf->pkt_len = packet_size;
662                 mbuf->port = sze_q->in_port;
663                 bufs[num_rx] = mbuf;
664                 num_rx++;
665                 num_bytes += packet_size;
666         }
667
668 finish:
669         sze_q->rx_pkts += num_rx;
670         sze_q->rx_bytes += num_bytes;
671         return num_rx;
672 }
673
674 static uint16_t
675 eth_szedata2_tx(void *queue,
676                 struct rte_mbuf **bufs,
677                 uint16_t nb_pkts)
678 {
679         struct rte_mbuf *mbuf;
680         struct szedata2_tx_queue *sze_q = queue;
681         uint16_t num_tx = 0;
682         uint64_t num_bytes = 0;
683
684         const struct szedata_lock *lck;
685         uint32_t lock_size;
686         uint32_t lock_size2;
687         void *dst;
688         uint32_t pkt_len;
689         uint32_t hwpkt_len;
690         uint32_t unlock_size;
691         uint32_t rem_len;
692         uint16_t mbuf_segs;
693         uint16_t pkt_left = nb_pkts;
694
695         if (sze_q->sze == NULL || nb_pkts == 0)
696                 return 0;
697
698         while (pkt_left > 0) {
699                 unlock_size = 0;
700                 lck = szedata_tx_lock_data(sze_q->sze,
701                         RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
702                         sze_q->tx_channel);
703                 if (lck == NULL)
704                         continue;
705
706                 dst = lck->start;
707                 lock_size = lck->len;
708                 lock_size2 = lck->next ? lck->next->len : 0;
709
710 next_packet:
711                 mbuf = bufs[nb_pkts - pkt_left];
712
713                 pkt_len = mbuf->pkt_len;
714                 mbuf_segs = mbuf->nb_segs;
715
716                 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
717                         RTE_SZE2_ALIGN8(pkt_len);
718
719                 if (lock_size + lock_size2 < hwpkt_len) {
720                         szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
721                         continue;
722                 }
723
724                 num_bytes += pkt_len;
725
726                 if (lock_size > hwpkt_len) {
727                         void *tmp_dst;
728
729                         rem_len = 0;
730
731                         /* write packet length at first 2 bytes in 8B header */
732                         *((uint16_t *)dst) = htole16(
733                                         RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
734                                         pkt_len);
735                         *(((uint16_t *)dst) + 1) = htole16(0);
736
737                         /* copy packet from mbuf */
738                         tmp_dst = ((uint8_t *)(dst)) +
739                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
740                         if (mbuf_segs == 1) {
741                                 /*
742                                  * non-scattered packet,
743                                  * transmit from one mbuf
744                                  */
745                                 rte_memcpy(tmp_dst,
746                                         rte_pktmbuf_mtod(mbuf, const void *),
747                                         pkt_len);
748                         } else {
749                                 /* scattered packet, transmit from more mbufs */
750                                 struct rte_mbuf *m = mbuf;
751                                 while (m) {
752                                         rte_memcpy(tmp_dst,
753                                                 rte_pktmbuf_mtod(m,
754                                                 const void *),
755                                                 m->data_len);
756                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
757                                                 m->data_len;
758                                         m = m->next;
759                                 }
760                         }
761
762
763                         dst = ((uint8_t *)dst) + hwpkt_len;
764                         unlock_size += hwpkt_len;
765                         lock_size -= hwpkt_len;
766
767                         rte_pktmbuf_free(mbuf);
768                         num_tx++;
769                         pkt_left--;
770                         if (pkt_left == 0) {
771                                 szedata_tx_unlock_data(sze_q->sze, lck,
772                                         unlock_size);
773                                 break;
774                         }
775                         goto next_packet;
776                 } else if (lock_size + lock_size2 >= hwpkt_len) {
777                         void *tmp_dst;
778                         uint16_t write_len;
779
780                         /* write packet length at first 2 bytes in 8B header */
781                         *((uint16_t *)dst) =
782                                 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
783                                         pkt_len);
784                         *(((uint16_t *)dst) + 1) = htole16(0);
785
786                         /*
787                          * If the raw packet (pkt_len) is smaller than lock_size
788                          * get the correct length for memcpy
789                          */
790                         write_len =
791                                 pkt_len < lock_size -
792                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
793                                 pkt_len :
794                                 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
795
796                         rem_len = hwpkt_len - lock_size;
797
798                         tmp_dst = ((uint8_t *)(dst)) +
799                                 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
800                         if (mbuf_segs == 1) {
801                                 /*
802                                  * non-scattered packet,
803                                  * transmit from one mbuf
804                                  */
805                                 /* copy part of packet to first area */
806                                 rte_memcpy(tmp_dst,
807                                         rte_pktmbuf_mtod(mbuf, const void *),
808                                         write_len);
809
810                                 if (lck->next)
811                                         dst = lck->next->start;
812
813                                 /* copy part of packet to second area */
814                                 rte_memcpy(dst,
815                                         (const void *)(rte_pktmbuf_mtod(mbuf,
816                                                         const uint8_t *) +
817                                         write_len), pkt_len - write_len);
818                         } else {
819                                 /* scattered packet, transmit from more mbufs */
820                                 struct rte_mbuf *m = mbuf;
821                                 uint16_t written = 0;
822                                 uint16_t to_write = 0;
823                                 bool new_mbuf = true;
824                                 uint16_t write_off = 0;
825
826                                 /* copy part of packet to first area */
827                                 while (m && written < write_len) {
828                                         to_write = RTE_MIN(m->data_len,
829                                                         write_len - written);
830                                         rte_memcpy(tmp_dst,
831                                                 rte_pktmbuf_mtod(m,
832                                                         const void *),
833                                                 to_write);
834
835                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
836                                                 to_write;
837                                         if (m->data_len <= write_len -
838                                                         written) {
839                                                 m = m->next;
840                                                 new_mbuf = true;
841                                         } else {
842                                                 new_mbuf = false;
843                                         }
844                                         written += to_write;
845                                 }
846
847                                 if (lck->next)
848                                         dst = lck->next->start;
849
850                                 tmp_dst = dst;
851                                 written = 0;
852                                 write_off = new_mbuf ? 0 : to_write;
853
854                                 /* copy part of packet to second area */
855                                 while (m && written < pkt_len - write_len) {
856                                         rte_memcpy(tmp_dst, (const void *)
857                                                 (rte_pktmbuf_mtod(m,
858                                                 uint8_t *) + write_off),
859                                                 m->data_len - write_off);
860
861                                         tmp_dst = ((uint8_t *)(tmp_dst)) +
862                                                 (m->data_len - write_off);
863                                         written += m->data_len - write_off;
864                                         m = m->next;
865                                         write_off = 0;
866                                 }
867                         }
868
869                         dst = ((uint8_t *)dst) + rem_len;
870                         unlock_size += hwpkt_len;
871                         lock_size = lock_size2 - rem_len;
872                         lock_size2 = 0;
873
874                         rte_pktmbuf_free(mbuf);
875                         num_tx++;
876                 }
877
878                 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
879                 pkt_left--;
880         }
881
882         sze_q->tx_pkts += num_tx;
883         sze_q->err_pkts += nb_pkts - num_tx;
884         sze_q->tx_bytes += num_bytes;
885         return num_tx;
886 }
887
888 static int
889 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
890 {
891         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
892         int ret;
893         struct pmd_internals *internals = (struct pmd_internals *)
894                 dev->data->dev_private;
895
896         if (rxq->sze == NULL) {
897                 uint32_t rx = 1 << rxq->rx_channel;
898                 uint32_t tx = 0;
899                 rxq->sze = szedata_open(internals->sze_dev);
900                 if (rxq->sze == NULL)
901                         return -EINVAL;
902                 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
903                 if (ret != 0 || rx == 0)
904                         goto err;
905         }
906
907         ret = szedata_start(rxq->sze);
908         if (ret != 0)
909                 goto err;
910         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
911         return 0;
912
913 err:
914         szedata_close(rxq->sze);
915         rxq->sze = NULL;
916         return -EINVAL;
917 }
918
919 static int
920 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
921 {
922         struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
923
924         if (rxq->sze != NULL) {
925                 szedata_close(rxq->sze);
926                 rxq->sze = NULL;
927         }
928
929         dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
930         return 0;
931 }
932
933 static int
934 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
935 {
936         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
937         int ret;
938         struct pmd_internals *internals = (struct pmd_internals *)
939                 dev->data->dev_private;
940
941         if (txq->sze == NULL) {
942                 uint32_t rx = 0;
943                 uint32_t tx = 1 << txq->tx_channel;
944                 txq->sze = szedata_open(internals->sze_dev);
945                 if (txq->sze == NULL)
946                         return -EINVAL;
947                 ret = szedata_subscribe3(txq->sze, &rx, &tx);
948                 if (ret != 0 || tx == 0)
949                         goto err;
950         }
951
952         ret = szedata_start(txq->sze);
953         if (ret != 0)
954                 goto err;
955         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
956         return 0;
957
958 err:
959         szedata_close(txq->sze);
960         txq->sze = NULL;
961         return -EINVAL;
962 }
963
964 static int
965 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
966 {
967         struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
968
969         if (txq->sze != NULL) {
970                 szedata_close(txq->sze);
971                 txq->sze = NULL;
972         }
973
974         dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
975         return 0;
976 }
977
978 static int
979 eth_dev_start(struct rte_eth_dev *dev)
980 {
981         int ret;
982         uint16_t i;
983         uint16_t nb_rx = dev->data->nb_rx_queues;
984         uint16_t nb_tx = dev->data->nb_tx_queues;
985
986         for (i = 0; i < nb_rx; i++) {
987                 ret = eth_rx_queue_start(dev, i);
988                 if (ret != 0)
989                         goto err_rx;
990         }
991
992         for (i = 0; i < nb_tx; i++) {
993                 ret = eth_tx_queue_start(dev, i);
994                 if (ret != 0)
995                         goto err_tx;
996         }
997
998         return 0;
999
1000 err_tx:
1001         for (i = 0; i < nb_tx; i++)
1002                 eth_tx_queue_stop(dev, i);
1003 err_rx:
1004         for (i = 0; i < nb_rx; i++)
1005                 eth_rx_queue_stop(dev, i);
1006         return ret;
1007 }
1008
1009 static void
1010 eth_dev_stop(struct rte_eth_dev *dev)
1011 {
1012         uint16_t i;
1013         uint16_t nb_rx = dev->data->nb_rx_queues;
1014         uint16_t nb_tx = dev->data->nb_tx_queues;
1015
1016         for (i = 0; i < nb_tx; i++)
1017                 eth_tx_queue_stop(dev, i);
1018
1019         for (i = 0; i < nb_rx; i++)
1020                 eth_rx_queue_stop(dev, i);
1021 }
1022
1023 static int
1024 eth_dev_configure(struct rte_eth_dev *dev)
1025 {
1026         struct rte_eth_dev_data *data = dev->data;
1027         if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
1028                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1029                 data->scattered_rx = 1;
1030         } else {
1031                 dev->rx_pkt_burst = eth_szedata2_rx;
1032                 data->scattered_rx = 0;
1033         }
1034         return 0;
1035 }
1036
1037 static void
1038 eth_dev_info(struct rte_eth_dev *dev,
1039                 struct rte_eth_dev_info *dev_info)
1040 {
1041         struct pmd_internals *internals = dev->data->dev_private;
1042
1043         dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1044         dev_info->if_index = 0;
1045         dev_info->max_mac_addrs = 1;
1046         dev_info->max_rx_pktlen = (uint32_t)-1;
1047         dev_info->max_rx_queues = internals->max_rx_queues;
1048         dev_info->max_tx_queues = internals->max_tx_queues;
1049         dev_info->min_rx_bufsize = 0;
1050         dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
1051         dev_info->tx_offload_capa = 0;
1052         dev_info->rx_queue_offload_capa = 0;
1053         dev_info->tx_queue_offload_capa = 0;
1054         dev_info->speed_capa = ETH_LINK_SPEED_100G;
1055 }
1056
1057 static int
1058 eth_stats_get(struct rte_eth_dev *dev,
1059                 struct rte_eth_stats *stats)
1060 {
1061         uint16_t i;
1062         uint16_t nb_rx = dev->data->nb_rx_queues;
1063         uint16_t nb_tx = dev->data->nb_tx_queues;
1064         uint64_t rx_total = 0;
1065         uint64_t tx_total = 0;
1066         uint64_t tx_err_total = 0;
1067         uint64_t rx_total_bytes = 0;
1068         uint64_t tx_total_bytes = 0;
1069
1070         for (i = 0; i < nb_rx; i++) {
1071                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1072
1073                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1074                         stats->q_ipackets[i] = rxq->rx_pkts;
1075                         stats->q_ibytes[i] = rxq->rx_bytes;
1076                 }
1077                 rx_total += rxq->rx_pkts;
1078                 rx_total_bytes += rxq->rx_bytes;
1079         }
1080
1081         for (i = 0; i < nb_tx; i++) {
1082                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1083
1084                 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1085                         stats->q_opackets[i] = txq->tx_pkts;
1086                         stats->q_obytes[i] = txq->tx_bytes;
1087                         stats->q_errors[i] = txq->err_pkts;
1088                 }
1089                 tx_total += txq->tx_pkts;
1090                 tx_total_bytes += txq->tx_bytes;
1091                 tx_err_total += txq->err_pkts;
1092         }
1093
1094         stats->ipackets = rx_total;
1095         stats->opackets = tx_total;
1096         stats->ibytes = rx_total_bytes;
1097         stats->obytes = tx_total_bytes;
1098         stats->oerrors = tx_err_total;
1099         stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1100
1101         return 0;
1102 }
1103
1104 static void
1105 eth_stats_reset(struct rte_eth_dev *dev)
1106 {
1107         uint16_t i;
1108         uint16_t nb_rx = dev->data->nb_rx_queues;
1109         uint16_t nb_tx = dev->data->nb_tx_queues;
1110
1111         for (i = 0; i < nb_rx; i++) {
1112                 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1113                 rxq->rx_pkts = 0;
1114                 rxq->rx_bytes = 0;
1115                 rxq->err_pkts = 0;
1116         }
1117         for (i = 0; i < nb_tx; i++) {
1118                 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1119                 txq->tx_pkts = 0;
1120                 txq->tx_bytes = 0;
1121                 txq->err_pkts = 0;
1122         }
1123 }
1124
1125 static void
1126 eth_rx_queue_release(void *q)
1127 {
1128         struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1129
1130         if (rxq != NULL) {
1131                 if (rxq->sze != NULL)
1132                         szedata_close(rxq->sze);
1133                 rte_free(rxq);
1134         }
1135 }
1136
1137 static void
1138 eth_tx_queue_release(void *q)
1139 {
1140         struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1141
1142         if (txq != NULL) {
1143                 if (txq->sze != NULL)
1144                         szedata_close(txq->sze);
1145                 rte_free(txq);
1146         }
1147 }
1148
1149 static void
1150 eth_dev_close(struct rte_eth_dev *dev)
1151 {
1152         uint16_t i;
1153         uint16_t nb_rx = dev->data->nb_rx_queues;
1154         uint16_t nb_tx = dev->data->nb_tx_queues;
1155
1156         eth_dev_stop(dev);
1157
1158         for (i = 0; i < nb_rx; i++) {
1159                 eth_rx_queue_release(dev->data->rx_queues[i]);
1160                 dev->data->rx_queues[i] = NULL;
1161         }
1162         dev->data->nb_rx_queues = 0;
1163         for (i = 0; i < nb_tx; i++) {
1164                 eth_tx_queue_release(dev->data->tx_queues[i]);
1165                 dev->data->tx_queues[i] = NULL;
1166         }
1167         dev->data->nb_tx_queues = 0;
1168 }
1169
1170 /**
1171  * Function takes value from first IBUF status register.
1172  * Values in IBUF and OBUF should be same.
1173  *
1174  * @param internals
1175  *     Pointer to device private structure.
1176  * @return
1177  *     Link speed constant.
1178  */
1179 static inline enum szedata2_link_speed
1180 get_link_speed(const struct pmd_internals *internals)
1181 {
1182         const volatile struct szedata2_ibuf *ibuf =
1183                 ibuf_ptr_by_index(internals->pci_rsc, 0);
1184         uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
1185         switch (speed) {
1186         case 0x03:
1187                 return SZEDATA2_LINK_SPEED_10G;
1188         case 0x04:
1189                 return SZEDATA2_LINK_SPEED_40G;
1190         case 0x05:
1191                 return SZEDATA2_LINK_SPEED_100G;
1192         default:
1193                 return SZEDATA2_LINK_SPEED_DEFAULT;
1194         }
1195 }
1196
1197 static int
1198 eth_link_update(struct rte_eth_dev *dev,
1199                 int wait_to_complete __rte_unused)
1200 {
1201         struct rte_eth_link link;
1202         struct pmd_internals *internals = (struct pmd_internals *)
1203                 dev->data->dev_private;
1204         const volatile struct szedata2_ibuf *ibuf;
1205         uint32_t i;
1206         bool link_is_up = false;
1207
1208         memset(&link, 0, sizeof(link));
1209
1210         switch (get_link_speed(internals)) {
1211         case SZEDATA2_LINK_SPEED_10G:
1212                 link.link_speed = ETH_SPEED_NUM_10G;
1213                 break;
1214         case SZEDATA2_LINK_SPEED_40G:
1215                 link.link_speed = ETH_SPEED_NUM_40G;
1216                 break;
1217         case SZEDATA2_LINK_SPEED_100G:
1218                 link.link_speed = ETH_SPEED_NUM_100G;
1219                 break;
1220         default:
1221                 link.link_speed = ETH_SPEED_NUM_10G;
1222                 break;
1223         }
1224
1225         /* szedata2 uses only full duplex */
1226         link.link_duplex = ETH_LINK_FULL_DUPLEX;
1227
1228         for (i = 0; i < szedata2_ibuf_count; i++) {
1229                 ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
1230                 /*
1231                  * Link is considered up if at least one ibuf is enabled
1232                  * and up.
1233                  */
1234                 if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
1235                         link_is_up = true;
1236                         break;
1237                 }
1238         }
1239
1240         link.link_status = link_is_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1241
1242         link.link_autoneg = ETH_LINK_FIXED;
1243
1244         rte_eth_linkstatus_set(dev, &link);
1245         return 0;
1246 }
1247
1248 static int
1249 eth_dev_set_link_up(struct rte_eth_dev *dev)
1250 {
1251         struct pmd_internals *internals = (struct pmd_internals *)
1252                 dev->data->dev_private;
1253         uint32_t i;
1254
1255         for (i = 0; i < szedata2_ibuf_count; i++)
1256                 ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
1257         for (i = 0; i < szedata2_obuf_count; i++)
1258                 obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
1259         return 0;
1260 }
1261
1262 static int
1263 eth_dev_set_link_down(struct rte_eth_dev *dev)
1264 {
1265         struct pmd_internals *internals = (struct pmd_internals *)
1266                 dev->data->dev_private;
1267         uint32_t i;
1268
1269         for (i = 0; i < szedata2_ibuf_count; i++)
1270                 ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
1271         for (i = 0; i < szedata2_obuf_count; i++)
1272                 obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
1273         return 0;
1274 }
1275
1276 static int
1277 eth_rx_queue_setup(struct rte_eth_dev *dev,
1278                 uint16_t rx_queue_id,
1279                 uint16_t nb_rx_desc __rte_unused,
1280                 unsigned int socket_id,
1281                 const struct rte_eth_rxconf *rx_conf __rte_unused,
1282                 struct rte_mempool *mb_pool)
1283 {
1284         struct pmd_internals *internals = dev->data->dev_private;
1285         struct szedata2_rx_queue *rxq;
1286         int ret;
1287         uint32_t rx = 1 << rx_queue_id;
1288         uint32_t tx = 0;
1289
1290         if (dev->data->rx_queues[rx_queue_id] != NULL) {
1291                 eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
1292                 dev->data->rx_queues[rx_queue_id] = NULL;
1293         }
1294
1295         rxq = rte_zmalloc_socket("szedata2 rx queue",
1296                         sizeof(struct szedata2_rx_queue),
1297                         RTE_CACHE_LINE_SIZE, socket_id);
1298         if (rxq == NULL) {
1299                 RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for rx queue id "
1300                                 "%" PRIu16 "!\n", rx_queue_id);
1301                 return -ENOMEM;
1302         }
1303
1304         rxq->priv = internals;
1305         rxq->sze = szedata_open(internals->sze_dev);
1306         if (rxq->sze == NULL) {
1307                 RTE_LOG(ERR, PMD, "szedata_open() failed for rx queue id "
1308                                 "%" PRIu16 "!\n", rx_queue_id);
1309                 eth_rx_queue_release(rxq);
1310                 return -EINVAL;
1311         }
1312         ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1313         if (ret != 0 || rx == 0) {
1314                 RTE_LOG(ERR, PMD, "szedata_subscribe3() failed for rx queue id "
1315                                 "%" PRIu16 "!\n", rx_queue_id);
1316                 eth_rx_queue_release(rxq);
1317                 return -EINVAL;
1318         }
1319         rxq->rx_channel = rx_queue_id;
1320         rxq->in_port = dev->data->port_id;
1321         rxq->mb_pool = mb_pool;
1322         rxq->rx_pkts = 0;
1323         rxq->rx_bytes = 0;
1324         rxq->err_pkts = 0;
1325
1326         dev->data->rx_queues[rx_queue_id] = rxq;
1327
1328         RTE_LOG(DEBUG, PMD, "Configured rx queue id %" PRIu16 " on socket "
1329                         "%u.\n", rx_queue_id, socket_id);
1330
1331         return 0;
1332 }
1333
1334 static int
1335 eth_tx_queue_setup(struct rte_eth_dev *dev,
1336                 uint16_t tx_queue_id,
1337                 uint16_t nb_tx_desc __rte_unused,
1338                 unsigned int socket_id,
1339                 const struct rte_eth_txconf *tx_conf __rte_unused)
1340 {
1341         struct pmd_internals *internals = dev->data->dev_private;
1342         struct szedata2_tx_queue *txq;
1343         int ret;
1344         uint32_t rx = 0;
1345         uint32_t tx = 1 << tx_queue_id;
1346
1347         if (dev->data->tx_queues[tx_queue_id] != NULL) {
1348                 eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
1349                 dev->data->tx_queues[tx_queue_id] = NULL;
1350         }
1351
1352         txq = rte_zmalloc_socket("szedata2 tx queue",
1353                         sizeof(struct szedata2_tx_queue),
1354                         RTE_CACHE_LINE_SIZE, socket_id);
1355         if (txq == NULL) {
1356                 RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for tx queue id "
1357                                 "%" PRIu16 "!\n", tx_queue_id);
1358                 return -ENOMEM;
1359         }
1360
1361         txq->priv = internals;
1362         txq->sze = szedata_open(internals->sze_dev);
1363         if (txq->sze == NULL) {
1364                 RTE_LOG(ERR, PMD, "szedata_open() failed for tx queue id "
1365                                 "%" PRIu16 "!\n", tx_queue_id);
1366                 eth_tx_queue_release(txq);
1367                 return -EINVAL;
1368         }
1369         ret = szedata_subscribe3(txq->sze, &rx, &tx);
1370         if (ret != 0 || tx == 0) {
1371                 RTE_LOG(ERR, PMD, "szedata_subscribe3() failed for tx queue id "
1372                                 "%" PRIu16 "!\n", tx_queue_id);
1373                 eth_tx_queue_release(txq);
1374                 return -EINVAL;
1375         }
1376         txq->tx_channel = tx_queue_id;
1377         txq->tx_pkts = 0;
1378         txq->tx_bytes = 0;
1379         txq->err_pkts = 0;
1380
1381         dev->data->tx_queues[tx_queue_id] = txq;
1382
1383         RTE_LOG(DEBUG, PMD, "Configured tx queue id %" PRIu16 " on socket "
1384                         "%u.\n", tx_queue_id, socket_id);
1385
1386         return 0;
1387 }
1388
1389 static void
1390 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1391                 struct ether_addr *mac_addr __rte_unused)
1392 {
1393 }
1394
1395 static void
1396 eth_promiscuous_enable(struct rte_eth_dev *dev)
1397 {
1398         struct pmd_internals *internals = (struct pmd_internals *)
1399                 dev->data->dev_private;
1400         uint32_t i;
1401
1402         for (i = 0; i < szedata2_ibuf_count; i++) {
1403                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1404                                 SZEDATA2_MAC_CHMODE_PROMISC);
1405         }
1406 }
1407
1408 static void
1409 eth_promiscuous_disable(struct rte_eth_dev *dev)
1410 {
1411         struct pmd_internals *internals = (struct pmd_internals *)
1412                 dev->data->dev_private;
1413         uint32_t i;
1414
1415         for (i = 0; i < szedata2_ibuf_count; i++) {
1416                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1417                                 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1418         }
1419 }
1420
1421 static void
1422 eth_allmulticast_enable(struct rte_eth_dev *dev)
1423 {
1424         struct pmd_internals *internals = (struct pmd_internals *)
1425                 dev->data->dev_private;
1426         uint32_t i;
1427
1428         for (i = 0; i < szedata2_ibuf_count; i++) {
1429                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1430                                 SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
1431         }
1432 }
1433
1434 static void
1435 eth_allmulticast_disable(struct rte_eth_dev *dev)
1436 {
1437         struct pmd_internals *internals = (struct pmd_internals *)
1438                 dev->data->dev_private;
1439         uint32_t i;
1440
1441         for (i = 0; i < szedata2_ibuf_count; i++) {
1442                 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1443                                 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1444         }
1445 }
1446
1447 static const struct eth_dev_ops ops = {
1448         .dev_start          = eth_dev_start,
1449         .dev_stop           = eth_dev_stop,
1450         .dev_set_link_up    = eth_dev_set_link_up,
1451         .dev_set_link_down  = eth_dev_set_link_down,
1452         .dev_close          = eth_dev_close,
1453         .dev_configure      = eth_dev_configure,
1454         .dev_infos_get      = eth_dev_info,
1455         .promiscuous_enable   = eth_promiscuous_enable,
1456         .promiscuous_disable  = eth_promiscuous_disable,
1457         .allmulticast_enable  = eth_allmulticast_enable,
1458         .allmulticast_disable = eth_allmulticast_disable,
1459         .rx_queue_start     = eth_rx_queue_start,
1460         .rx_queue_stop      = eth_rx_queue_stop,
1461         .tx_queue_start     = eth_tx_queue_start,
1462         .tx_queue_stop      = eth_tx_queue_stop,
1463         .rx_queue_setup     = eth_rx_queue_setup,
1464         .tx_queue_setup     = eth_tx_queue_setup,
1465         .rx_queue_release   = eth_rx_queue_release,
1466         .tx_queue_release   = eth_tx_queue_release,
1467         .link_update        = eth_link_update,
1468         .stats_get          = eth_stats_get,
1469         .stats_reset        = eth_stats_reset,
1470         .mac_addr_set       = eth_mac_addr_set,
1471 };
1472
1473 /*
1474  * This function goes through sysfs and looks for an index of szedata2
1475  * device file (/dev/szedataIIX, where X is the index).
1476  *
1477  * @return
1478  *           0 on success
1479  *          -1 on error
1480  */
1481 static int
1482 get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
1483 {
1484         DIR *dir;
1485         struct dirent *entry;
1486         int ret;
1487         uint32_t tmp_index;
1488         FILE *fd;
1489         char pcislot_path[PATH_MAX];
1490         uint32_t domain;
1491         uint8_t bus;
1492         uint8_t devid;
1493         uint8_t function;
1494
1495         dir = opendir("/sys/class/combo");
1496         if (dir == NULL)
1497                 return -1;
1498
1499         /*
1500          * Iterate through all combosixX directories.
1501          * When the value in /sys/class/combo/combosixX/device/pcislot
1502          * file is the location of the ethernet device dev, "X" is the
1503          * index of the device.
1504          */
1505         while ((entry = readdir(dir)) != NULL) {
1506                 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1507                 if (ret != 1)
1508                         continue;
1509
1510                 snprintf(pcislot_path, PATH_MAX,
1511                         "/sys/class/combo/combosix%u/device/pcislot",
1512                         tmp_index);
1513
1514                 fd = fopen(pcislot_path, "r");
1515                 if (fd == NULL)
1516                         continue;
1517
1518                 ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
1519                                 &domain, &bus, &devid, &function);
1520                 fclose(fd);
1521                 if (ret != 4)
1522                         continue;
1523
1524                 if (pcislot_addr->domain == domain &&
1525                                 pcislot_addr->bus == bus &&
1526                                 pcislot_addr->devid == devid &&
1527                                 pcislot_addr->function == function) {
1528                         *index = tmp_index;
1529                         closedir(dir);
1530                         return 0;
1531                 }
1532         }
1533
1534         closedir(dir);
1535         return -1;
1536 }
1537
1538 static int
1539 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1540 {
1541         struct rte_eth_dev_data *data = dev->data;
1542         struct pmd_internals *internals = (struct pmd_internals *)
1543                 data->dev_private;
1544         struct szedata *szedata_temp;
1545         int ret;
1546         uint32_t szedata2_index;
1547         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1548         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1549         struct rte_mem_resource *pci_rsc =
1550                 &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1551         char rsc_filename[PATH_MAX];
1552         void *pci_resource_ptr = NULL;
1553         int fd;
1554
1555         RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n",
1556                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1557                         pci_addr->function);
1558
1559         internals->dev = dev;
1560
1561         /* Get index of szedata2 device file and create path to device file */
1562         ret = get_szedata2_index(pci_addr, &szedata2_index);
1563         if (ret != 0) {
1564                 RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
1565                 return -ENODEV;
1566         }
1567         snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1568                         szedata2_index);
1569
1570         RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev);
1571
1572         /*
1573          * Get number of available DMA RX and TX channels, which is maximum
1574          * number of queues that can be created and store it in private device
1575          * data structure.
1576          */
1577         szedata_temp = szedata_open(internals->sze_dev);
1578         if (szedata_temp == NULL) {
1579                 RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s",
1580                                 internals->sze_dev);
1581                 return -EINVAL;
1582         }
1583         internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1584                         SZE2_DIR_RX);
1585         internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1586                         SZE2_DIR_TX);
1587         szedata_close(szedata_temp);
1588
1589         RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n",
1590                         internals->max_rx_queues, internals->max_tx_queues);
1591
1592         /* Set rx, tx burst functions */
1593         if (data->scattered_rx == 1)
1594                 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1595         else
1596                 dev->rx_pkt_burst = eth_szedata2_rx;
1597         dev->tx_pkt_burst = eth_szedata2_tx;
1598
1599         /* Set function callbacks for Ethernet API */
1600         dev->dev_ops = &ops;
1601
1602         rte_eth_copy_pci_info(dev, pci_dev);
1603
1604         /* mmap pci resource0 file to rte_mem_resource structure */
1605         if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1606                         0) {
1607                 RTE_LOG(ERR, PMD, "Missing resource%u file\n",
1608                                 PCI_RESOURCE_NUMBER);
1609                 return -EINVAL;
1610         }
1611         snprintf(rsc_filename, PATH_MAX,
1612                 "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
1613                 pci_addr->domain, pci_addr->bus,
1614                 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1615         fd = open(rsc_filename, O_RDWR);
1616         if (fd < 0) {
1617                 RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename);
1618                 return -EINVAL;
1619         }
1620
1621         pci_resource_ptr = mmap(0,
1622                         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1623                         PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1624         close(fd);
1625         if (pci_resource_ptr == MAP_FAILED) {
1626                 RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
1627                                 rsc_filename, fd);
1628                 return -EINVAL;
1629         }
1630         pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
1631         internals->pci_rsc = pci_rsc;
1632
1633         RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
1634                         "virt addr = %llx\n", PCI_RESOURCE_NUMBER,
1635                         (unsigned long long)pci_rsc->phys_addr,
1636                         (unsigned long long)pci_rsc->len,
1637                         (unsigned long long)pci_rsc->addr);
1638
1639         /* Get link state */
1640         eth_link_update(dev, 0);
1641
1642         /* Allocate space for one mac address */
1643         data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1644                         RTE_CACHE_LINE_SIZE);
1645         if (data->mac_addrs == NULL) {
1646                 RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
1647                 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1648                        pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1649                 return -EINVAL;
1650         }
1651
1652         ether_addr_copy(&eth_addr, data->mac_addrs);
1653
1654         /* At initial state COMBO card is in promiscuous mode so disable it */
1655         eth_promiscuous_disable(dev);
1656
1657         RTE_LOG(INFO, PMD, "szedata2 device ("
1658                         PCI_PRI_FMT ") successfully initialized\n",
1659                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1660                         pci_addr->function);
1661
1662         return 0;
1663 }
1664
1665 static int
1666 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1667 {
1668         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1669         struct rte_pci_addr *pci_addr = &pci_dev->addr;
1670
1671         rte_free(dev->data->mac_addrs);
1672         dev->data->mac_addrs = NULL;
1673         munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1674                pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1675
1676         RTE_LOG(INFO, PMD, "szedata2 device ("
1677                         PCI_PRI_FMT ") successfully uninitialized\n",
1678                         pci_addr->domain, pci_addr->bus, pci_addr->devid,
1679                         pci_addr->function);
1680
1681         return 0;
1682 }
1683
1684 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1685         {
1686                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1687                                 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1688         },
1689         {
1690                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1691                                 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1692         },
1693         {
1694                 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1695                                 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1696         },
1697         {
1698                 .vendor_id = 0,
1699         }
1700 };
1701
1702 static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1703         struct rte_pci_device *pci_dev)
1704 {
1705         return rte_eth_dev_pci_generic_probe(pci_dev,
1706                 sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
1707 }
1708
1709 static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
1710 {
1711         return rte_eth_dev_pci_generic_remove(pci_dev,
1712                 rte_szedata2_eth_dev_uninit);
1713 }
1714
1715 static struct rte_pci_driver szedata2_eth_driver = {
1716         .id_table = rte_szedata2_pci_id_table,
1717         .probe = szedata2_eth_pci_probe,
1718         .remove = szedata2_eth_pci_remove,
1719 };
1720
1721 RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
1722 RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
1723 RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
1724         "* combo6core & combov3 & szedata2 & szedata2_cv3");