4 * Copyright (c) 2015 - 2016 CESNET
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of CESNET nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <sys/types.h>
47 #include <rte_ethdev.h>
48 #include <rte_malloc.h>
49 #include <rte_memcpy.h>
50 #include <rte_kvargs.h>
52 #include <rte_atomic.h>
54 #include "rte_eth_szedata2.h"
56 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
57 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
58 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
61 * size of szedata2_packet header with alignment
63 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
65 #define RTE_SZEDATA2_DRIVER_NAME "rte_szedata2_pmd"
66 #define RTE_SZEDATA2_PCI_DRIVER_NAME "rte_szedata2_pmd"
68 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
70 struct szedata2_rx_queue {
74 struct rte_mempool *mb_pool;
75 volatile uint64_t rx_pkts;
76 volatile uint64_t rx_bytes;
77 volatile uint64_t err_pkts;
80 struct szedata2_tx_queue {
83 volatile uint64_t tx_pkts;
84 volatile uint64_t tx_bytes;
85 volatile uint64_t err_pkts;
88 struct pmd_internals {
89 struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
90 struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
91 uint16_t max_rx_queues;
92 uint16_t max_tx_queues;
93 char sze_dev[PATH_MAX];
96 static struct ether_addr eth_addr = {
97 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
101 eth_szedata2_rx(void *queue,
102 struct rte_mbuf **bufs,
106 struct rte_mbuf *mbuf;
107 struct szedata2_rx_queue *sze_q = queue;
108 struct rte_pktmbuf_pool_private *mbp_priv;
113 uint16_t packet_size;
114 uint64_t num_bytes = 0;
115 struct szedata *sze = sze_q->sze;
116 uint8_t *header_ptr = NULL; /* header of packet */
117 uint8_t *packet_ptr1 = NULL;
118 uint8_t *packet_ptr2 = NULL;
119 uint16_t packet_len1 = 0;
120 uint16_t packet_len2 = 0;
121 uint16_t hw_data_align;
123 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
127 * Reads the given number of packets from szedata2 channel given
128 * by queue and copies the packet data into a newly allocated mbuf
131 for (i = 0; i < nb_pkts; i++) {
132 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
134 if (unlikely(mbuf == NULL))
137 /* get the next sze packet */
138 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
139 sze->ct_rx_lck->next == NULL) {
140 /* unlock old data */
141 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
142 sze->ct_rx_lck_orig = NULL;
143 sze->ct_rx_lck = NULL;
146 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
147 /* nothing to read, lock new data */
148 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
149 sze->ct_rx_lck_orig = sze->ct_rx_lck;
151 if (sze->ct_rx_lck == NULL) {
152 /* nothing to lock */
153 rte_pktmbuf_free(mbuf);
157 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
158 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
160 if (!sze->ct_rx_rem_bytes) {
161 rte_pktmbuf_free(mbuf);
166 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
169 * copy parts of header to merge buffer
171 if (sze->ct_rx_lck->next == NULL) {
172 rte_pktmbuf_free(mbuf);
176 /* copy first part of header */
177 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
178 sze->ct_rx_rem_bytes);
180 /* copy second part of header */
181 sze->ct_rx_lck = sze->ct_rx_lck->next;
182 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
183 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
185 RTE_SZE2_PACKET_HEADER_SIZE -
186 sze->ct_rx_rem_bytes);
188 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
189 sze->ct_rx_rem_bytes;
190 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
191 RTE_SZE2_PACKET_HEADER_SIZE +
192 sze->ct_rx_rem_bytes;
194 header_ptr = (uint8_t *)sze->ct_rx_buffer;
197 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
198 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
199 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
202 sg_size = le16toh(*((uint16_t *)header_ptr));
203 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
204 packet_size = sg_size -
205 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
208 /* checks if packet all right */
210 errx(5, "Zero segsize");
212 /* check sg_size and hwsize */
213 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
214 errx(10, "Hwsize bigger than expected. Segsize: %d, "
215 "hwsize: %d", sg_size, hw_size);
219 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
220 RTE_SZE2_PACKET_HEADER_SIZE;
222 if (sze->ct_rx_rem_bytes >=
224 RTE_SZE2_PACKET_HEADER_SIZE)) {
226 /* one packet ready - go to another */
227 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
228 packet_len1 = packet_size;
232 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
233 RTE_SZE2_PACKET_HEADER_SIZE;
234 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
235 RTE_SZE2_PACKET_HEADER_SIZE;
238 if (sze->ct_rx_lck->next == NULL) {
239 errx(6, "Need \"next\" lock, "
240 "but it is missing: %u",
241 sze->ct_rx_rem_bytes);
245 if (sze->ct_rx_rem_bytes <= hw_data_align) {
246 uint16_t rem_size = hw_data_align -
247 sze->ct_rx_rem_bytes;
249 /* MOVE to next lock */
250 sze->ct_rx_lck = sze->ct_rx_lck->next;
252 (void *)(((uint8_t *)
253 (sze->ct_rx_lck->start)) + rem_size);
255 packet_ptr1 = sze->ct_rx_cur_ptr;
256 packet_len1 = packet_size;
260 sze->ct_rx_cur_ptr +=
261 RTE_SZE2_ALIGN8(packet_size);
262 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
263 rem_size - RTE_SZE2_ALIGN8(packet_size);
265 /* get pointer and length from first part */
266 packet_ptr1 = sze->ct_rx_cur_ptr +
268 packet_len1 = sze->ct_rx_rem_bytes -
271 /* MOVE to next lock */
272 sze->ct_rx_lck = sze->ct_rx_lck->next;
273 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
275 /* get pointer and length from second part */
276 packet_ptr2 = sze->ct_rx_cur_ptr;
277 packet_len2 = packet_size - packet_len1;
279 sze->ct_rx_cur_ptr +=
280 RTE_SZE2_ALIGN8(packet_size) -
282 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
283 (RTE_SZE2_ALIGN8(packet_size) -
288 if (unlikely(packet_ptr1 == NULL)) {
289 rte_pktmbuf_free(mbuf);
293 /* get the space available for data in the mbuf */
294 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
295 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
296 RTE_PKTMBUF_HEADROOM);
298 if (packet_size <= buf_size) {
299 /* sze packet will fit in one mbuf, go ahead and copy */
300 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
301 packet_ptr1, packet_len1);
302 if (packet_ptr2 != NULL) {
303 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
304 uint8_t *) + packet_len1),
305 packet_ptr2, packet_len2);
307 mbuf->data_len = (uint16_t)packet_size;
309 mbuf->pkt_len = packet_size;
310 mbuf->port = sze_q->in_port;
313 num_bytes += packet_size;
316 * sze packet will not fit in one mbuf,
317 * scattered mode is not enabled, drop packet
320 "SZE segment %d bytes will not fit in one mbuf "
321 "(%d bytes), scattered mode is not enabled, "
323 packet_size, buf_size);
324 rte_pktmbuf_free(mbuf);
328 sze_q->rx_pkts += num_rx;
329 sze_q->rx_bytes += num_bytes;
334 eth_szedata2_rx_scattered(void *queue,
335 struct rte_mbuf **bufs,
339 struct rte_mbuf *mbuf;
340 struct szedata2_rx_queue *sze_q = queue;
341 struct rte_pktmbuf_pool_private *mbp_priv;
346 uint16_t packet_size;
347 uint64_t num_bytes = 0;
348 struct szedata *sze = sze_q->sze;
349 uint8_t *header_ptr = NULL; /* header of packet */
350 uint8_t *packet_ptr1 = NULL;
351 uint8_t *packet_ptr2 = NULL;
352 uint16_t packet_len1 = 0;
353 uint16_t packet_len2 = 0;
354 uint16_t hw_data_align;
356 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
360 * Reads the given number of packets from szedata2 channel given
361 * by queue and copies the packet data into a newly allocated mbuf
364 for (i = 0; i < nb_pkts; i++) {
365 const struct szedata_lock *ct_rx_lck_backup;
366 unsigned int ct_rx_rem_bytes_backup;
367 unsigned char *ct_rx_cur_ptr_backup;
369 /* get the next sze packet */
370 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
371 sze->ct_rx_lck->next == NULL) {
372 /* unlock old data */
373 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
374 sze->ct_rx_lck_orig = NULL;
375 sze->ct_rx_lck = NULL;
379 * Store items from sze structure which can be changed
380 * before mbuf allocating. Use these items in case of mbuf
381 * allocating failure.
383 ct_rx_lck_backup = sze->ct_rx_lck;
384 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
385 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
387 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
388 /* nothing to read, lock new data */
389 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
390 sze->ct_rx_lck_orig = sze->ct_rx_lck;
393 * Backup items from sze structure must be updated
394 * after locking to contain pointers to new locks.
396 ct_rx_lck_backup = sze->ct_rx_lck;
397 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
398 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
400 if (sze->ct_rx_lck == NULL)
401 /* nothing to lock */
404 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
405 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
407 if (!sze->ct_rx_rem_bytes)
411 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
413 * cut in header - copy parts of header to merge buffer
415 if (sze->ct_rx_lck->next == NULL)
418 /* copy first part of header */
419 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
420 sze->ct_rx_rem_bytes);
422 /* copy second part of header */
423 sze->ct_rx_lck = sze->ct_rx_lck->next;
424 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
425 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
427 RTE_SZE2_PACKET_HEADER_SIZE -
428 sze->ct_rx_rem_bytes);
430 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
431 sze->ct_rx_rem_bytes;
432 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
433 RTE_SZE2_PACKET_HEADER_SIZE +
434 sze->ct_rx_rem_bytes;
436 header_ptr = (uint8_t *)sze->ct_rx_buffer;
439 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
440 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
441 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
444 sg_size = le16toh(*((uint16_t *)header_ptr));
445 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
446 packet_size = sg_size -
447 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
450 /* checks if packet all right */
452 errx(5, "Zero segsize");
454 /* check sg_size and hwsize */
455 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
456 errx(10, "Hwsize bigger than expected. Segsize: %d, "
457 "hwsize: %d", sg_size, hw_size);
461 RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
462 hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
464 if (sze->ct_rx_rem_bytes >=
466 RTE_SZE2_PACKET_HEADER_SIZE)) {
468 /* one packet ready - go to another */
469 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
470 packet_len1 = packet_size;
474 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
475 RTE_SZE2_PACKET_HEADER_SIZE;
476 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
477 RTE_SZE2_PACKET_HEADER_SIZE;
480 if (sze->ct_rx_lck->next == NULL) {
481 errx(6, "Need \"next\" lock, but it is "
482 "missing: %u", sze->ct_rx_rem_bytes);
486 if (sze->ct_rx_rem_bytes <= hw_data_align) {
487 uint16_t rem_size = hw_data_align -
488 sze->ct_rx_rem_bytes;
490 /* MOVE to next lock */
491 sze->ct_rx_lck = sze->ct_rx_lck->next;
493 (void *)(((uint8_t *)
494 (sze->ct_rx_lck->start)) + rem_size);
496 packet_ptr1 = sze->ct_rx_cur_ptr;
497 packet_len1 = packet_size;
501 sze->ct_rx_cur_ptr +=
502 RTE_SZE2_ALIGN8(packet_size);
503 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
504 rem_size - RTE_SZE2_ALIGN8(packet_size);
506 /* get pointer and length from first part */
507 packet_ptr1 = sze->ct_rx_cur_ptr +
509 packet_len1 = sze->ct_rx_rem_bytes -
512 /* MOVE to next lock */
513 sze->ct_rx_lck = sze->ct_rx_lck->next;
514 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
516 /* get pointer and length from second part */
517 packet_ptr2 = sze->ct_rx_cur_ptr;
518 packet_len2 = packet_size - packet_len1;
520 sze->ct_rx_cur_ptr +=
521 RTE_SZE2_ALIGN8(packet_size) -
523 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
524 (RTE_SZE2_ALIGN8(packet_size) -
529 if (unlikely(packet_ptr1 == NULL))
532 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
534 if (unlikely(mbuf == NULL)) {
536 * Restore items from sze structure to state after
537 * unlocking (eventually locking).
539 sze->ct_rx_lck = ct_rx_lck_backup;
540 sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
541 sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
545 /* get the space available for data in the mbuf */
546 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
547 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
548 RTE_PKTMBUF_HEADROOM);
550 if (packet_size <= buf_size) {
551 /* sze packet will fit in one mbuf, go ahead and copy */
552 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
553 packet_ptr1, packet_len1);
554 if (packet_ptr2 != NULL) {
556 (rte_pktmbuf_mtod(mbuf, uint8_t *) +
557 packet_len1), packet_ptr2, packet_len2);
559 mbuf->data_len = (uint16_t)packet_size;
562 * sze packet will not fit in one mbuf,
563 * scatter packet into more mbufs
565 struct rte_mbuf *m = mbuf;
566 uint16_t len = rte_pktmbuf_tailroom(mbuf);
568 /* copy first part of packet */
569 /* fill first mbuf */
570 rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
573 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
575 while (packet_len1 > 0) {
577 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
579 if (unlikely(m->next == NULL)) {
580 rte_pktmbuf_free(mbuf);
582 * Restore items from sze structure
583 * to state after unlocking (eventually
586 sze->ct_rx_lck = ct_rx_lck_backup;
587 sze->ct_rx_rem_bytes =
588 ct_rx_rem_bytes_backup;
590 ct_rx_cur_ptr_backup;
596 len = RTE_MIN(rte_pktmbuf_tailroom(m),
598 rte_memcpy(rte_pktmbuf_append(mbuf, len),
603 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
606 if (packet_ptr2 != NULL) {
607 /* copy second part of packet, if exists */
608 /* fill the rest of currently last mbuf */
609 len = rte_pktmbuf_tailroom(m);
610 rte_memcpy(rte_pktmbuf_append(mbuf, len),
613 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
615 while (packet_len2 > 0) {
617 m->next = rte_pktmbuf_alloc(
620 if (unlikely(m->next == NULL)) {
621 rte_pktmbuf_free(mbuf);
623 * Restore items from sze
624 * structure to state after
625 * unlocking (eventually
630 sze->ct_rx_rem_bytes =
631 ct_rx_rem_bytes_backup;
633 ct_rx_cur_ptr_backup;
639 len = RTE_MIN(rte_pktmbuf_tailroom(m),
642 rte_pktmbuf_append(mbuf, len),
647 packet_ptr2 = ((uint8_t *)packet_ptr2) +
652 mbuf->pkt_len = packet_size;
653 mbuf->port = sze_q->in_port;
656 num_bytes += packet_size;
660 sze_q->rx_pkts += num_rx;
661 sze_q->rx_bytes += num_bytes;
666 eth_szedata2_tx(void *queue,
667 struct rte_mbuf **bufs,
670 struct rte_mbuf *mbuf;
671 struct szedata2_tx_queue *sze_q = queue;
673 uint64_t num_bytes = 0;
675 const struct szedata_lock *lck;
681 uint32_t unlock_size;
684 uint16_t pkt_left = nb_pkts;
686 if (sze_q->sze == NULL || nb_pkts == 0)
689 while (pkt_left > 0) {
691 lck = szedata_tx_lock_data(sze_q->sze,
692 RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
698 lock_size = lck->len;
699 lock_size2 = lck->next ? lck->next->len : 0;
702 mbuf = bufs[nb_pkts - pkt_left];
704 pkt_len = mbuf->pkt_len;
705 mbuf_segs = mbuf->nb_segs;
707 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
708 RTE_SZE2_ALIGN8(pkt_len);
710 if (lock_size + lock_size2 < hwpkt_len) {
711 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
715 num_bytes += pkt_len;
717 if (lock_size > hwpkt_len) {
722 /* write packet length at first 2 bytes in 8B header */
723 *((uint16_t *)dst) = htole16(
724 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
726 *(((uint16_t *)dst) + 1) = htole16(0);
728 /* copy packet from mbuf */
729 tmp_dst = ((uint8_t *)(dst)) +
730 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
731 if (mbuf_segs == 1) {
733 * non-scattered packet,
734 * transmit from one mbuf
737 rte_pktmbuf_mtod(mbuf, const void *),
740 /* scattered packet, transmit from more mbufs */
741 struct rte_mbuf *m = mbuf;
747 tmp_dst = ((uint8_t *)(tmp_dst)) +
754 dst = ((uint8_t *)dst) + hwpkt_len;
755 unlock_size += hwpkt_len;
756 lock_size -= hwpkt_len;
758 rte_pktmbuf_free(mbuf);
762 szedata_tx_unlock_data(sze_q->sze, lck,
767 } else if (lock_size + lock_size2 >= hwpkt_len) {
771 /* write packet length at first 2 bytes in 8B header */
773 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
775 *(((uint16_t *)dst) + 1) = htole16(0);
778 * If the raw packet (pkt_len) is smaller than lock_size
779 * get the correct length for memcpy
782 pkt_len < lock_size -
783 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
785 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
787 rem_len = hwpkt_len - lock_size;
789 tmp_dst = ((uint8_t *)(dst)) +
790 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
791 if (mbuf_segs == 1) {
793 * non-scattered packet,
794 * transmit from one mbuf
796 /* copy part of packet to first area */
798 rte_pktmbuf_mtod(mbuf, const void *),
802 dst = lck->next->start;
804 /* copy part of packet to second area */
806 (const void *)(rte_pktmbuf_mtod(mbuf,
808 write_len), pkt_len - write_len);
810 /* scattered packet, transmit from more mbufs */
811 struct rte_mbuf *m = mbuf;
812 uint16_t written = 0;
813 uint16_t to_write = 0;
814 bool new_mbuf = true;
815 uint16_t write_off = 0;
817 /* copy part of packet to first area */
818 while (m && written < write_len) {
819 to_write = RTE_MIN(m->data_len,
820 write_len - written);
826 tmp_dst = ((uint8_t *)(tmp_dst)) +
828 if (m->data_len <= write_len -
839 dst = lck->next->start;
843 write_off = new_mbuf ? 0 : to_write;
845 /* copy part of packet to second area */
846 while (m && written < pkt_len - write_len) {
847 rte_memcpy(tmp_dst, (const void *)
849 uint8_t *) + write_off),
850 m->data_len - write_off);
852 tmp_dst = ((uint8_t *)(tmp_dst)) +
853 (m->data_len - write_off);
854 written += m->data_len - write_off;
860 dst = ((uint8_t *)dst) + rem_len;
861 unlock_size += hwpkt_len;
862 lock_size = lock_size2 - rem_len;
865 rte_pktmbuf_free(mbuf);
869 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
873 sze_q->tx_pkts += num_tx;
874 sze_q->err_pkts += nb_pkts - num_tx;
875 sze_q->tx_bytes += num_bytes;
880 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
882 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
884 struct pmd_internals *internals = (struct pmd_internals *)
885 dev->data->dev_private;
887 if (rxq->sze == NULL) {
888 uint32_t rx = 1 << rxq->rx_channel;
890 rxq->sze = szedata_open(internals->sze_dev);
891 if (rxq->sze == NULL)
893 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
894 if (ret != 0 || rx == 0)
898 ret = szedata_start(rxq->sze);
901 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
905 szedata_close(rxq->sze);
911 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
913 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
915 if (rxq->sze != NULL) {
916 szedata_close(rxq->sze);
920 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
925 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
927 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
929 struct pmd_internals *internals = (struct pmd_internals *)
930 dev->data->dev_private;
932 if (txq->sze == NULL) {
934 uint32_t tx = 1 << txq->tx_channel;
935 txq->sze = szedata_open(internals->sze_dev);
936 if (txq->sze == NULL)
938 ret = szedata_subscribe3(txq->sze, &rx, &tx);
939 if (ret != 0 || tx == 0)
943 ret = szedata_start(txq->sze);
946 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
950 szedata_close(txq->sze);
956 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
958 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
960 if (txq->sze != NULL) {
961 szedata_close(txq->sze);
965 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
970 eth_dev_start(struct rte_eth_dev *dev)
974 uint16_t nb_rx = dev->data->nb_rx_queues;
975 uint16_t nb_tx = dev->data->nb_tx_queues;
977 for (i = 0; i < nb_rx; i++) {
978 ret = eth_rx_queue_start(dev, i);
983 for (i = 0; i < nb_tx; i++) {
984 ret = eth_tx_queue_start(dev, i);
992 for (i = 0; i < nb_tx; i++)
993 eth_tx_queue_stop(dev, i);
995 for (i = 0; i < nb_rx; i++)
996 eth_rx_queue_stop(dev, i);
1001 eth_dev_stop(struct rte_eth_dev *dev)
1004 uint16_t nb_rx = dev->data->nb_rx_queues;
1005 uint16_t nb_tx = dev->data->nb_tx_queues;
1007 for (i = 0; i < nb_tx; i++)
1008 eth_tx_queue_stop(dev, i);
1010 for (i = 0; i < nb_rx; i++)
1011 eth_rx_queue_stop(dev, i);
1015 eth_dev_configure(struct rte_eth_dev *dev)
1017 struct rte_eth_dev_data *data = dev->data;
1018 if (data->dev_conf.rxmode.enable_scatter == 1) {
1019 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1020 data->scattered_rx = 1;
1022 dev->rx_pkt_burst = eth_szedata2_rx;
1023 data->scattered_rx = 0;
1029 eth_dev_info(struct rte_eth_dev *dev,
1030 struct rte_eth_dev_info *dev_info)
1032 struct pmd_internals *internals = dev->data->dev_private;
1033 dev_info->if_index = 0;
1034 dev_info->max_mac_addrs = 1;
1035 dev_info->max_rx_pktlen = (uint32_t)-1;
1036 dev_info->max_rx_queues = internals->max_rx_queues;
1037 dev_info->max_tx_queues = internals->max_tx_queues;
1038 dev_info->min_rx_bufsize = 0;
1039 dev_info->speed_capa = ETH_LINK_SPEED_100G;
1043 eth_stats_get(struct rte_eth_dev *dev,
1044 struct rte_eth_stats *stats)
1047 uint16_t nb_rx = dev->data->nb_rx_queues;
1048 uint16_t nb_tx = dev->data->nb_tx_queues;
1049 uint64_t rx_total = 0;
1050 uint64_t tx_total = 0;
1051 uint64_t tx_err_total = 0;
1052 uint64_t rx_total_bytes = 0;
1053 uint64_t tx_total_bytes = 0;
1054 const struct pmd_internals *internals = dev->data->dev_private;
1056 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_rx; i++) {
1057 stats->q_ipackets[i] = internals->rx_queue[i].rx_pkts;
1058 stats->q_ibytes[i] = internals->rx_queue[i].rx_bytes;
1059 rx_total += stats->q_ipackets[i];
1060 rx_total_bytes += stats->q_ibytes[i];
1063 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS && i < nb_tx; i++) {
1064 stats->q_opackets[i] = internals->tx_queue[i].tx_pkts;
1065 stats->q_obytes[i] = internals->tx_queue[i].tx_bytes;
1066 stats->q_errors[i] = internals->tx_queue[i].err_pkts;
1067 tx_total += stats->q_opackets[i];
1068 tx_total_bytes += stats->q_obytes[i];
1069 tx_err_total += stats->q_errors[i];
1072 stats->ipackets = rx_total;
1073 stats->opackets = tx_total;
1074 stats->ibytes = rx_total_bytes;
1075 stats->obytes = tx_total_bytes;
1076 stats->oerrors = tx_err_total;
1080 eth_stats_reset(struct rte_eth_dev *dev)
1083 uint16_t nb_rx = dev->data->nb_rx_queues;
1084 uint16_t nb_tx = dev->data->nb_tx_queues;
1085 struct pmd_internals *internals = dev->data->dev_private;
1087 for (i = 0; i < nb_rx; i++) {
1088 internals->rx_queue[i].rx_pkts = 0;
1089 internals->rx_queue[i].rx_bytes = 0;
1090 internals->rx_queue[i].err_pkts = 0;
1092 for (i = 0; i < nb_tx; i++) {
1093 internals->tx_queue[i].tx_pkts = 0;
1094 internals->tx_queue[i].tx_bytes = 0;
1095 internals->tx_queue[i].err_pkts = 0;
1100 eth_rx_queue_release(void *q)
1102 struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1103 if (rxq->sze != NULL) {
1104 szedata_close(rxq->sze);
1110 eth_tx_queue_release(void *q)
1112 struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1113 if (txq->sze != NULL) {
1114 szedata_close(txq->sze);
1120 eth_dev_close(struct rte_eth_dev *dev)
1123 uint16_t nb_rx = dev->data->nb_rx_queues;
1124 uint16_t nb_tx = dev->data->nb_tx_queues;
1128 for (i = 0; i < nb_rx; i++) {
1129 eth_rx_queue_release(dev->data->rx_queues[i]);
1130 dev->data->rx_queues[i] = NULL;
1132 dev->data->nb_rx_queues = 0;
1133 for (i = 0; i < nb_tx; i++) {
1134 eth_tx_queue_release(dev->data->tx_queues[i]);
1135 dev->data->tx_queues[i] = NULL;
1137 dev->data->nb_tx_queues = 0;
1141 eth_link_update(struct rte_eth_dev *dev,
1142 int wait_to_complete __rte_unused)
1144 struct rte_eth_link link;
1145 struct rte_eth_link *link_ptr = &link;
1146 struct rte_eth_link *dev_link = &dev->data->dev_link;
1147 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1148 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1149 volatile struct szedata2_cgmii_ibuf *);
1151 switch (cgmii_link_speed(ibuf)) {
1152 case SZEDATA2_LINK_SPEED_10G:
1153 link.link_speed = ETH_SPEED_NUM_10G;
1155 case SZEDATA2_LINK_SPEED_40G:
1156 link.link_speed = ETH_SPEED_NUM_40G;
1158 case SZEDATA2_LINK_SPEED_100G:
1159 link.link_speed = ETH_SPEED_NUM_100G;
1162 link.link_speed = ETH_SPEED_NUM_10G;
1166 /* szedata2 uses only full duplex */
1167 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1169 link.link_status = (cgmii_ibuf_is_enabled(ibuf) &&
1170 cgmii_ibuf_is_link_up(ibuf)) ? ETH_LINK_UP : ETH_LINK_DOWN;
1172 link.link_autoneg = ETH_LINK_SPEED_FIXED;
1174 rte_atomic64_cmpset((uint64_t *)dev_link, *(uint64_t *)dev_link,
1175 *(uint64_t *)link_ptr);
1181 eth_dev_set_link_up(struct rte_eth_dev *dev)
1183 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1184 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1185 volatile struct szedata2_cgmii_ibuf *);
1186 volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
1187 dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
1188 volatile struct szedata2_cgmii_obuf *);
1190 cgmii_ibuf_enable(ibuf);
1191 cgmii_obuf_enable(obuf);
1196 eth_dev_set_link_down(struct rte_eth_dev *dev)
1198 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1199 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1200 volatile struct szedata2_cgmii_ibuf *);
1201 volatile struct szedata2_cgmii_obuf *obuf = SZEDATA2_PCI_RESOURCE_PTR(
1202 dev, SZEDATA2_CGMII_OBUF_BASE_OFF,
1203 volatile struct szedata2_cgmii_obuf *);
1205 cgmii_ibuf_disable(ibuf);
1206 cgmii_obuf_disable(obuf);
1211 eth_rx_queue_setup(struct rte_eth_dev *dev,
1212 uint16_t rx_queue_id,
1213 uint16_t nb_rx_desc __rte_unused,
1214 unsigned int socket_id __rte_unused,
1215 const struct rte_eth_rxconf *rx_conf __rte_unused,
1216 struct rte_mempool *mb_pool)
1218 struct pmd_internals *internals = dev->data->dev_private;
1219 struct szedata2_rx_queue *rxq = &internals->rx_queue[rx_queue_id];
1221 uint32_t rx = 1 << rx_queue_id;
1224 rxq->sze = szedata_open(internals->sze_dev);
1225 if (rxq->sze == NULL)
1227 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1228 if (ret != 0 || rx == 0) {
1229 szedata_close(rxq->sze);
1233 rxq->rx_channel = rx_queue_id;
1234 rxq->in_port = dev->data->port_id;
1235 rxq->mb_pool = mb_pool;
1240 dev->data->rx_queues[rx_queue_id] = rxq;
1245 eth_tx_queue_setup(struct rte_eth_dev *dev,
1246 uint16_t tx_queue_id,
1247 uint16_t nb_tx_desc __rte_unused,
1248 unsigned int socket_id __rte_unused,
1249 const struct rte_eth_txconf *tx_conf __rte_unused)
1251 struct pmd_internals *internals = dev->data->dev_private;
1252 struct szedata2_tx_queue *txq = &internals->tx_queue[tx_queue_id];
1255 uint32_t tx = 1 << tx_queue_id;
1257 txq->sze = szedata_open(internals->sze_dev);
1258 if (txq->sze == NULL)
1260 ret = szedata_subscribe3(txq->sze, &rx, &tx);
1261 if (ret != 0 || tx == 0) {
1262 szedata_close(txq->sze);
1266 txq->tx_channel = tx_queue_id;
1271 dev->data->tx_queues[tx_queue_id] = txq;
1276 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1277 struct ether_addr *mac_addr __rte_unused)
1282 eth_promiscuous_enable(struct rte_eth_dev *dev)
1284 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1285 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1286 volatile struct szedata2_cgmii_ibuf *);
1287 cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_PROMISC);
1291 eth_promiscuous_disable(struct rte_eth_dev *dev)
1293 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1294 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1295 volatile struct szedata2_cgmii_ibuf *);
1296 cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
1300 eth_allmulticast_enable(struct rte_eth_dev *dev)
1302 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1303 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1304 volatile struct szedata2_cgmii_ibuf *);
1305 cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
1309 eth_allmulticast_disable(struct rte_eth_dev *dev)
1311 volatile struct szedata2_cgmii_ibuf *ibuf = SZEDATA2_PCI_RESOURCE_PTR(
1312 dev, SZEDATA2_CGMII_IBUF_BASE_OFF,
1313 volatile struct szedata2_cgmii_ibuf *);
1314 cgmii_ibuf_mac_mode_write(ibuf, SZEDATA2_MAC_CHMODE_ONLY_VALID);
1317 static const struct eth_dev_ops ops = {
1318 .dev_start = eth_dev_start,
1319 .dev_stop = eth_dev_stop,
1320 .dev_set_link_up = eth_dev_set_link_up,
1321 .dev_set_link_down = eth_dev_set_link_down,
1322 .dev_close = eth_dev_close,
1323 .dev_configure = eth_dev_configure,
1324 .dev_infos_get = eth_dev_info,
1325 .promiscuous_enable = eth_promiscuous_enable,
1326 .promiscuous_disable = eth_promiscuous_disable,
1327 .allmulticast_enable = eth_allmulticast_enable,
1328 .allmulticast_disable = eth_allmulticast_disable,
1329 .rx_queue_start = eth_rx_queue_start,
1330 .rx_queue_stop = eth_rx_queue_stop,
1331 .tx_queue_start = eth_tx_queue_start,
1332 .tx_queue_stop = eth_tx_queue_stop,
1333 .rx_queue_setup = eth_rx_queue_setup,
1334 .tx_queue_setup = eth_tx_queue_setup,
1335 .rx_queue_release = eth_rx_queue_release,
1336 .tx_queue_release = eth_tx_queue_release,
1337 .link_update = eth_link_update,
1338 .stats_get = eth_stats_get,
1339 .stats_reset = eth_stats_reset,
1340 .mac_addr_set = eth_mac_addr_set,
1344 * This function goes through sysfs and looks for an index of szedata2
1345 * device file (/dev/szedataIIX, where X is the index).
1352 get_szedata2_index(struct rte_eth_dev *dev, uint32_t *index)
1355 struct dirent *entry;
1359 char pcislot_path[PATH_MAX];
1360 struct rte_pci_addr pcislot_addr = dev->pci_dev->addr;
1366 dir = opendir("/sys/class/combo");
1371 * Iterate through all combosixX directories.
1372 * When the value in /sys/class/combo/combosixX/device/pcislot
1373 * file is the location of the ethernet device dev, "X" is the
1374 * index of the device.
1376 while ((entry = readdir(dir)) != NULL) {
1377 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1381 snprintf(pcislot_path, PATH_MAX,
1382 "/sys/class/combo/combosix%u/device/pcislot",
1385 fd = fopen(pcislot_path, "r");
1389 ret = fscanf(fd, "%4" PRIx16 ":%2" PRIx8 ":%2" PRIx8 ".%" PRIx8,
1390 &domain, &bus, &devid, &function);
1395 if (pcislot_addr.domain == domain &&
1396 pcislot_addr.bus == bus &&
1397 pcislot_addr.devid == devid &&
1398 pcislot_addr.function == function) {
1410 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1412 struct rte_eth_dev_data *data = dev->data;
1413 struct pmd_internals *internals = (struct pmd_internals *)
1415 struct szedata *szedata_temp;
1417 uint32_t szedata2_index;
1418 struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
1419 struct rte_pci_resource *pci_rsc =
1420 &dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1421 char rsc_filename[PATH_MAX];
1422 void *pci_resource_ptr = NULL;
1425 RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n",
1426 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1427 pci_addr->function);
1429 /* Get index of szedata2 device file and create path to device file */
1430 ret = get_szedata2_index(dev, &szedata2_index);
1432 RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
1435 snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1438 RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev);
1441 * Get number of available DMA RX and TX channels, which is maximum
1442 * number of queues that can be created and store it in private device
1445 szedata_temp = szedata_open(internals->sze_dev);
1446 if (szedata_temp == NULL) {
1447 RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s",
1448 internals->sze_dev);
1451 internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1453 internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1455 szedata_close(szedata_temp);
1457 RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n",
1458 internals->max_rx_queues, internals->max_tx_queues);
1460 /* Set rx, tx burst functions */
1461 if (data->dev_conf.rxmode.enable_scatter == 1 ||
1462 data->scattered_rx == 1) {
1463 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1464 data->scattered_rx = 1;
1466 dev->rx_pkt_burst = eth_szedata2_rx;
1467 data->scattered_rx = 0;
1469 dev->tx_pkt_burst = eth_szedata2_tx;
1471 /* Set function callbacks for Ethernet API */
1472 dev->dev_ops = &ops;
1474 rte_eth_copy_pci_info(dev, dev->pci_dev);
1476 /* mmap pci resource0 file to rte_pci_resource structure */
1477 if (dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1479 RTE_LOG(ERR, PMD, "Missing resource%u file\n",
1480 PCI_RESOURCE_NUMBER);
1483 snprintf(rsc_filename, PATH_MAX,
1484 "%s/" PCI_PRI_FMT "/resource%u", pci_get_sysfs_path(),
1485 pci_addr->domain, pci_addr->bus,
1486 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1487 fd = open(rsc_filename, O_RDWR);
1489 RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename);
1493 pci_resource_ptr = mmap(0,
1494 dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1495 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1497 if (pci_resource_ptr == NULL) {
1498 RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
1502 dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr =
1505 RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
1506 "virt addr = %llx\n", PCI_RESOURCE_NUMBER,
1507 (unsigned long long)pci_rsc->phys_addr,
1508 (unsigned long long)pci_rsc->len,
1509 (unsigned long long)pci_rsc->addr);
1511 /* Get link state */
1512 eth_link_update(dev, 0);
1514 /* Allocate space for one mac address */
1515 data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1516 RTE_CACHE_LINE_SIZE);
1517 if (data->mac_addrs == NULL) {
1518 RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
1519 munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1520 dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1524 ether_addr_copy(ð_addr, data->mac_addrs);
1526 /* At initial state COMBO card is in promiscuous mode so disable it */
1527 eth_promiscuous_disable(dev);
1529 RTE_LOG(INFO, PMD, "szedata2 device ("
1530 PCI_PRI_FMT ") successfully initialized\n",
1531 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1532 pci_addr->function);
1538 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1540 struct rte_pci_addr *pci_addr = &dev->pci_dev->addr;
1542 rte_free(dev->data->mac_addrs);
1543 dev->data->mac_addrs = NULL;
1544 munmap(dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1545 dev->pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1547 RTE_LOG(INFO, PMD, "szedata2 device ("
1548 PCI_PRI_FMT ") successfully uninitialized\n",
1549 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1550 pci_addr->function);
1555 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1557 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1558 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1561 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1562 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1565 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1566 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1573 static struct eth_driver szedata2_eth_driver = {
1575 .name = RTE_SZEDATA2_PCI_DRIVER_NAME,
1576 .id_table = rte_szedata2_pci_id_table,
1578 .eth_dev_init = rte_szedata2_eth_dev_init,
1579 .eth_dev_uninit = rte_szedata2_eth_dev_uninit,
1580 .dev_private_size = sizeof(struct pmd_internals),
1584 rte_szedata2_init(const char *name __rte_unused,
1585 const char *args __rte_unused)
1587 rte_eth_driver_register(&szedata2_eth_driver);
1592 rte_szedata2_uninit(const char *name __rte_unused)
1597 static struct rte_driver rte_szedata2_driver = {
1599 .name = RTE_SZEDATA2_DRIVER_NAME,
1600 .init = rte_szedata2_init,
1601 .uninit = rte_szedata2_uninit,
1604 PMD_REGISTER_DRIVER(rte_szedata2_driver, rte_szedata2_pmd);
1605 DRIVER_REGISTER_PCI_TABLE(rte_szedata2_pmd, rte_szedata2_pci_id_table);