1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 - 2016 CESNET
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_malloc.h>
21 #include <rte_memcpy.h>
22 #include <rte_kvargs.h>
25 #include "rte_eth_szedata2.h"
26 #include "szedata2_iobuf.h"
28 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
29 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
30 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
33 * size of szedata2_packet header with alignment
35 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
37 #define RTE_SZEDATA2_DRIVER_NAME net_szedata2
39 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
41 struct pmd_internals {
42 struct rte_eth_dev *dev;
43 uint16_t max_rx_queues;
44 uint16_t max_tx_queues;
45 char sze_dev[PATH_MAX];
46 struct rte_mem_resource *pci_rsc;
49 struct szedata2_rx_queue {
50 struct pmd_internals *priv;
54 struct rte_mempool *mb_pool;
55 volatile uint64_t rx_pkts;
56 volatile uint64_t rx_bytes;
57 volatile uint64_t err_pkts;
60 struct szedata2_tx_queue {
61 struct pmd_internals *priv;
64 volatile uint64_t tx_pkts;
65 volatile uint64_t tx_bytes;
66 volatile uint64_t err_pkts;
69 static struct ether_addr eth_addr = {
70 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
74 eth_szedata2_rx(void *queue,
75 struct rte_mbuf **bufs,
79 struct rte_mbuf *mbuf;
80 struct szedata2_rx_queue *sze_q = queue;
81 struct rte_pktmbuf_pool_private *mbp_priv;
87 uint64_t num_bytes = 0;
88 struct szedata *sze = sze_q->sze;
89 uint8_t *header_ptr = NULL; /* header of packet */
90 uint8_t *packet_ptr1 = NULL;
91 uint8_t *packet_ptr2 = NULL;
92 uint16_t packet_len1 = 0;
93 uint16_t packet_len2 = 0;
94 uint16_t hw_data_align;
96 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
100 * Reads the given number of packets from szedata2 channel given
101 * by queue and copies the packet data into a newly allocated mbuf
104 for (i = 0; i < nb_pkts; i++) {
105 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
107 if (unlikely(mbuf == NULL)) {
108 sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
112 /* get the next sze packet */
113 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
114 sze->ct_rx_lck->next == NULL) {
115 /* unlock old data */
116 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
117 sze->ct_rx_lck_orig = NULL;
118 sze->ct_rx_lck = NULL;
121 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
122 /* nothing to read, lock new data */
123 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
124 sze->ct_rx_lck_orig = sze->ct_rx_lck;
126 if (sze->ct_rx_lck == NULL) {
127 /* nothing to lock */
128 rte_pktmbuf_free(mbuf);
132 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
133 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
135 if (!sze->ct_rx_rem_bytes) {
136 rte_pktmbuf_free(mbuf);
141 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
144 * copy parts of header to merge buffer
146 if (sze->ct_rx_lck->next == NULL) {
147 rte_pktmbuf_free(mbuf);
151 /* copy first part of header */
152 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
153 sze->ct_rx_rem_bytes);
155 /* copy second part of header */
156 sze->ct_rx_lck = sze->ct_rx_lck->next;
157 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
158 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
160 RTE_SZE2_PACKET_HEADER_SIZE -
161 sze->ct_rx_rem_bytes);
163 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
164 sze->ct_rx_rem_bytes;
165 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
166 RTE_SZE2_PACKET_HEADER_SIZE +
167 sze->ct_rx_rem_bytes;
169 header_ptr = (uint8_t *)sze->ct_rx_buffer;
172 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
173 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
174 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
177 sg_size = le16toh(*((uint16_t *)header_ptr));
178 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
179 packet_size = sg_size -
180 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
183 /* checks if packet all right */
185 errx(5, "Zero segsize");
187 /* check sg_size and hwsize */
188 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
189 errx(10, "Hwsize bigger than expected. Segsize: %d, "
190 "hwsize: %d", sg_size, hw_size);
194 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
195 RTE_SZE2_PACKET_HEADER_SIZE;
197 if (sze->ct_rx_rem_bytes >=
199 RTE_SZE2_PACKET_HEADER_SIZE)) {
201 /* one packet ready - go to another */
202 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
203 packet_len1 = packet_size;
207 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
208 RTE_SZE2_PACKET_HEADER_SIZE;
209 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
210 RTE_SZE2_PACKET_HEADER_SIZE;
213 if (sze->ct_rx_lck->next == NULL) {
214 errx(6, "Need \"next\" lock, "
215 "but it is missing: %u",
216 sze->ct_rx_rem_bytes);
220 if (sze->ct_rx_rem_bytes <= hw_data_align) {
221 uint16_t rem_size = hw_data_align -
222 sze->ct_rx_rem_bytes;
224 /* MOVE to next lock */
225 sze->ct_rx_lck = sze->ct_rx_lck->next;
227 (void *)(((uint8_t *)
228 (sze->ct_rx_lck->start)) + rem_size);
230 packet_ptr1 = sze->ct_rx_cur_ptr;
231 packet_len1 = packet_size;
235 sze->ct_rx_cur_ptr +=
236 RTE_SZE2_ALIGN8(packet_size);
237 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
238 rem_size - RTE_SZE2_ALIGN8(packet_size);
240 /* get pointer and length from first part */
241 packet_ptr1 = sze->ct_rx_cur_ptr +
243 packet_len1 = sze->ct_rx_rem_bytes -
246 /* MOVE to next lock */
247 sze->ct_rx_lck = sze->ct_rx_lck->next;
248 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
250 /* get pointer and length from second part */
251 packet_ptr2 = sze->ct_rx_cur_ptr;
252 packet_len2 = packet_size - packet_len1;
254 sze->ct_rx_cur_ptr +=
255 RTE_SZE2_ALIGN8(packet_size) -
257 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
258 (RTE_SZE2_ALIGN8(packet_size) -
263 if (unlikely(packet_ptr1 == NULL)) {
264 rte_pktmbuf_free(mbuf);
268 /* get the space available for data in the mbuf */
269 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
270 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
271 RTE_PKTMBUF_HEADROOM);
273 if (packet_size <= buf_size) {
274 /* sze packet will fit in one mbuf, go ahead and copy */
275 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
276 packet_ptr1, packet_len1);
277 if (packet_ptr2 != NULL) {
278 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
279 uint8_t *) + packet_len1),
280 packet_ptr2, packet_len2);
282 mbuf->data_len = (uint16_t)packet_size;
284 mbuf->pkt_len = packet_size;
285 mbuf->port = sze_q->in_port;
288 num_bytes += packet_size;
291 * sze packet will not fit in one mbuf,
292 * scattered mode is not enabled, drop packet
295 "SZE segment %d bytes will not fit in one mbuf "
296 "(%d bytes), scattered mode is not enabled, "
298 packet_size, buf_size);
299 rte_pktmbuf_free(mbuf);
303 sze_q->rx_pkts += num_rx;
304 sze_q->rx_bytes += num_bytes;
309 eth_szedata2_rx_scattered(void *queue,
310 struct rte_mbuf **bufs,
314 struct rte_mbuf *mbuf;
315 struct szedata2_rx_queue *sze_q = queue;
316 struct rte_pktmbuf_pool_private *mbp_priv;
321 uint16_t packet_size;
322 uint64_t num_bytes = 0;
323 struct szedata *sze = sze_q->sze;
324 uint8_t *header_ptr = NULL; /* header of packet */
325 uint8_t *packet_ptr1 = NULL;
326 uint8_t *packet_ptr2 = NULL;
327 uint16_t packet_len1 = 0;
328 uint16_t packet_len2 = 0;
329 uint16_t hw_data_align;
330 uint64_t *mbuf_failed_ptr =
331 &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
333 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
337 * Reads the given number of packets from szedata2 channel given
338 * by queue and copies the packet data into a newly allocated mbuf
341 for (i = 0; i < nb_pkts; i++) {
342 const struct szedata_lock *ct_rx_lck_backup;
343 unsigned int ct_rx_rem_bytes_backup;
344 unsigned char *ct_rx_cur_ptr_backup;
346 /* get the next sze packet */
347 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
348 sze->ct_rx_lck->next == NULL) {
349 /* unlock old data */
350 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
351 sze->ct_rx_lck_orig = NULL;
352 sze->ct_rx_lck = NULL;
356 * Store items from sze structure which can be changed
357 * before mbuf allocating. Use these items in case of mbuf
358 * allocating failure.
360 ct_rx_lck_backup = sze->ct_rx_lck;
361 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
362 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
364 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
365 /* nothing to read, lock new data */
366 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
367 sze->ct_rx_lck_orig = sze->ct_rx_lck;
370 * Backup items from sze structure must be updated
371 * after locking to contain pointers to new locks.
373 ct_rx_lck_backup = sze->ct_rx_lck;
374 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
375 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
377 if (sze->ct_rx_lck == NULL)
378 /* nothing to lock */
381 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
382 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
384 if (!sze->ct_rx_rem_bytes)
388 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
390 * cut in header - copy parts of header to merge buffer
392 if (sze->ct_rx_lck->next == NULL)
395 /* copy first part of header */
396 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
397 sze->ct_rx_rem_bytes);
399 /* copy second part of header */
400 sze->ct_rx_lck = sze->ct_rx_lck->next;
401 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
402 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
404 RTE_SZE2_PACKET_HEADER_SIZE -
405 sze->ct_rx_rem_bytes);
407 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
408 sze->ct_rx_rem_bytes;
409 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
410 RTE_SZE2_PACKET_HEADER_SIZE +
411 sze->ct_rx_rem_bytes;
413 header_ptr = (uint8_t *)sze->ct_rx_buffer;
416 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
417 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
418 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
421 sg_size = le16toh(*((uint16_t *)header_ptr));
422 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
423 packet_size = sg_size -
424 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
427 /* checks if packet all right */
429 errx(5, "Zero segsize");
431 /* check sg_size and hwsize */
432 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
433 errx(10, "Hwsize bigger than expected. Segsize: %d, "
434 "hwsize: %d", sg_size, hw_size);
438 RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
439 hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
441 if (sze->ct_rx_rem_bytes >=
443 RTE_SZE2_PACKET_HEADER_SIZE)) {
445 /* one packet ready - go to another */
446 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
447 packet_len1 = packet_size;
451 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
452 RTE_SZE2_PACKET_HEADER_SIZE;
453 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
454 RTE_SZE2_PACKET_HEADER_SIZE;
457 if (sze->ct_rx_lck->next == NULL) {
458 errx(6, "Need \"next\" lock, but it is "
459 "missing: %u", sze->ct_rx_rem_bytes);
463 if (sze->ct_rx_rem_bytes <= hw_data_align) {
464 uint16_t rem_size = hw_data_align -
465 sze->ct_rx_rem_bytes;
467 /* MOVE to next lock */
468 sze->ct_rx_lck = sze->ct_rx_lck->next;
470 (void *)(((uint8_t *)
471 (sze->ct_rx_lck->start)) + rem_size);
473 packet_ptr1 = sze->ct_rx_cur_ptr;
474 packet_len1 = packet_size;
478 sze->ct_rx_cur_ptr +=
479 RTE_SZE2_ALIGN8(packet_size);
480 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
481 rem_size - RTE_SZE2_ALIGN8(packet_size);
483 /* get pointer and length from first part */
484 packet_ptr1 = sze->ct_rx_cur_ptr +
486 packet_len1 = sze->ct_rx_rem_bytes -
489 /* MOVE to next lock */
490 sze->ct_rx_lck = sze->ct_rx_lck->next;
491 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
493 /* get pointer and length from second part */
494 packet_ptr2 = sze->ct_rx_cur_ptr;
495 packet_len2 = packet_size - packet_len1;
497 sze->ct_rx_cur_ptr +=
498 RTE_SZE2_ALIGN8(packet_size) -
500 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
501 (RTE_SZE2_ALIGN8(packet_size) -
506 if (unlikely(packet_ptr1 == NULL))
509 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
511 if (unlikely(mbuf == NULL)) {
513 * Restore items from sze structure to state after
514 * unlocking (eventually locking).
516 sze->ct_rx_lck = ct_rx_lck_backup;
517 sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
518 sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
519 sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
523 /* get the space available for data in the mbuf */
524 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
525 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
526 RTE_PKTMBUF_HEADROOM);
528 if (packet_size <= buf_size) {
529 /* sze packet will fit in one mbuf, go ahead and copy */
530 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
531 packet_ptr1, packet_len1);
532 if (packet_ptr2 != NULL) {
534 (rte_pktmbuf_mtod(mbuf, uint8_t *) +
535 packet_len1), packet_ptr2, packet_len2);
537 mbuf->data_len = (uint16_t)packet_size;
540 * sze packet will not fit in one mbuf,
541 * scatter packet into more mbufs
543 struct rte_mbuf *m = mbuf;
544 uint16_t len = rte_pktmbuf_tailroom(mbuf);
546 /* copy first part of packet */
547 /* fill first mbuf */
548 rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
551 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
553 while (packet_len1 > 0) {
555 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
557 if (unlikely(m->next == NULL)) {
558 rte_pktmbuf_free(mbuf);
560 * Restore items from sze structure
561 * to state after unlocking (eventually
564 sze->ct_rx_lck = ct_rx_lck_backup;
565 sze->ct_rx_rem_bytes =
566 ct_rx_rem_bytes_backup;
568 ct_rx_cur_ptr_backup;
569 (*mbuf_failed_ptr)++;
575 len = RTE_MIN(rte_pktmbuf_tailroom(m),
577 rte_memcpy(rte_pktmbuf_append(mbuf, len),
582 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
585 if (packet_ptr2 != NULL) {
586 /* copy second part of packet, if exists */
587 /* fill the rest of currently last mbuf */
588 len = rte_pktmbuf_tailroom(m);
589 rte_memcpy(rte_pktmbuf_append(mbuf, len),
592 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
594 while (packet_len2 > 0) {
596 m->next = rte_pktmbuf_alloc(
599 if (unlikely(m->next == NULL)) {
600 rte_pktmbuf_free(mbuf);
602 * Restore items from sze
603 * structure to state after
604 * unlocking (eventually
609 sze->ct_rx_rem_bytes =
610 ct_rx_rem_bytes_backup;
612 ct_rx_cur_ptr_backup;
613 (*mbuf_failed_ptr)++;
619 len = RTE_MIN(rte_pktmbuf_tailroom(m),
622 rte_pktmbuf_append(mbuf, len),
627 packet_ptr2 = ((uint8_t *)packet_ptr2) +
632 mbuf->pkt_len = packet_size;
633 mbuf->port = sze_q->in_port;
636 num_bytes += packet_size;
640 sze_q->rx_pkts += num_rx;
641 sze_q->rx_bytes += num_bytes;
646 eth_szedata2_tx(void *queue,
647 struct rte_mbuf **bufs,
650 struct rte_mbuf *mbuf;
651 struct szedata2_tx_queue *sze_q = queue;
653 uint64_t num_bytes = 0;
655 const struct szedata_lock *lck;
661 uint32_t unlock_size;
664 uint16_t pkt_left = nb_pkts;
666 if (sze_q->sze == NULL || nb_pkts == 0)
669 while (pkt_left > 0) {
671 lck = szedata_tx_lock_data(sze_q->sze,
672 RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
678 lock_size = lck->len;
679 lock_size2 = lck->next ? lck->next->len : 0;
682 mbuf = bufs[nb_pkts - pkt_left];
684 pkt_len = mbuf->pkt_len;
685 mbuf_segs = mbuf->nb_segs;
687 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
688 RTE_SZE2_ALIGN8(pkt_len);
690 if (lock_size + lock_size2 < hwpkt_len) {
691 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
695 num_bytes += pkt_len;
697 if (lock_size > hwpkt_len) {
702 /* write packet length at first 2 bytes in 8B header */
703 *((uint16_t *)dst) = htole16(
704 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
706 *(((uint16_t *)dst) + 1) = htole16(0);
708 /* copy packet from mbuf */
709 tmp_dst = ((uint8_t *)(dst)) +
710 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
711 if (mbuf_segs == 1) {
713 * non-scattered packet,
714 * transmit from one mbuf
717 rte_pktmbuf_mtod(mbuf, const void *),
720 /* scattered packet, transmit from more mbufs */
721 struct rte_mbuf *m = mbuf;
727 tmp_dst = ((uint8_t *)(tmp_dst)) +
734 dst = ((uint8_t *)dst) + hwpkt_len;
735 unlock_size += hwpkt_len;
736 lock_size -= hwpkt_len;
738 rte_pktmbuf_free(mbuf);
742 szedata_tx_unlock_data(sze_q->sze, lck,
747 } else if (lock_size + lock_size2 >= hwpkt_len) {
751 /* write packet length at first 2 bytes in 8B header */
753 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
755 *(((uint16_t *)dst) + 1) = htole16(0);
758 * If the raw packet (pkt_len) is smaller than lock_size
759 * get the correct length for memcpy
762 pkt_len < lock_size -
763 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
765 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
767 rem_len = hwpkt_len - lock_size;
769 tmp_dst = ((uint8_t *)(dst)) +
770 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
771 if (mbuf_segs == 1) {
773 * non-scattered packet,
774 * transmit from one mbuf
776 /* copy part of packet to first area */
778 rte_pktmbuf_mtod(mbuf, const void *),
782 dst = lck->next->start;
784 /* copy part of packet to second area */
786 (const void *)(rte_pktmbuf_mtod(mbuf,
788 write_len), pkt_len - write_len);
790 /* scattered packet, transmit from more mbufs */
791 struct rte_mbuf *m = mbuf;
792 uint16_t written = 0;
793 uint16_t to_write = 0;
794 bool new_mbuf = true;
795 uint16_t write_off = 0;
797 /* copy part of packet to first area */
798 while (m && written < write_len) {
799 to_write = RTE_MIN(m->data_len,
800 write_len - written);
806 tmp_dst = ((uint8_t *)(tmp_dst)) +
808 if (m->data_len <= write_len -
819 dst = lck->next->start;
823 write_off = new_mbuf ? 0 : to_write;
825 /* copy part of packet to second area */
826 while (m && written < pkt_len - write_len) {
827 rte_memcpy(tmp_dst, (const void *)
829 uint8_t *) + write_off),
830 m->data_len - write_off);
832 tmp_dst = ((uint8_t *)(tmp_dst)) +
833 (m->data_len - write_off);
834 written += m->data_len - write_off;
840 dst = ((uint8_t *)dst) + rem_len;
841 unlock_size += hwpkt_len;
842 lock_size = lock_size2 - rem_len;
845 rte_pktmbuf_free(mbuf);
849 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
853 sze_q->tx_pkts += num_tx;
854 sze_q->err_pkts += nb_pkts - num_tx;
855 sze_q->tx_bytes += num_bytes;
860 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
862 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
864 struct pmd_internals *internals = (struct pmd_internals *)
865 dev->data->dev_private;
867 if (rxq->sze == NULL) {
868 uint32_t rx = 1 << rxq->rx_channel;
870 rxq->sze = szedata_open(internals->sze_dev);
871 if (rxq->sze == NULL)
873 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
874 if (ret != 0 || rx == 0)
878 ret = szedata_start(rxq->sze);
881 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
885 szedata_close(rxq->sze);
891 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
893 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
895 if (rxq->sze != NULL) {
896 szedata_close(rxq->sze);
900 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
905 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
907 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
909 struct pmd_internals *internals = (struct pmd_internals *)
910 dev->data->dev_private;
912 if (txq->sze == NULL) {
914 uint32_t tx = 1 << txq->tx_channel;
915 txq->sze = szedata_open(internals->sze_dev);
916 if (txq->sze == NULL)
918 ret = szedata_subscribe3(txq->sze, &rx, &tx);
919 if (ret != 0 || tx == 0)
923 ret = szedata_start(txq->sze);
926 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
930 szedata_close(txq->sze);
936 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
938 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
940 if (txq->sze != NULL) {
941 szedata_close(txq->sze);
945 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
950 eth_dev_start(struct rte_eth_dev *dev)
954 uint16_t nb_rx = dev->data->nb_rx_queues;
955 uint16_t nb_tx = dev->data->nb_tx_queues;
957 for (i = 0; i < nb_rx; i++) {
958 ret = eth_rx_queue_start(dev, i);
963 for (i = 0; i < nb_tx; i++) {
964 ret = eth_tx_queue_start(dev, i);
972 for (i = 0; i < nb_tx; i++)
973 eth_tx_queue_stop(dev, i);
975 for (i = 0; i < nb_rx; i++)
976 eth_rx_queue_stop(dev, i);
981 eth_dev_stop(struct rte_eth_dev *dev)
984 uint16_t nb_rx = dev->data->nb_rx_queues;
985 uint16_t nb_tx = dev->data->nb_tx_queues;
987 for (i = 0; i < nb_tx; i++)
988 eth_tx_queue_stop(dev, i);
990 for (i = 0; i < nb_rx; i++)
991 eth_rx_queue_stop(dev, i);
995 eth_dev_configure(struct rte_eth_dev *dev)
997 struct rte_eth_dev_data *data = dev->data;
998 if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
999 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1000 data->scattered_rx = 1;
1002 dev->rx_pkt_burst = eth_szedata2_rx;
1003 data->scattered_rx = 0;
1009 eth_dev_info(struct rte_eth_dev *dev,
1010 struct rte_eth_dev_info *dev_info)
1012 struct pmd_internals *internals = dev->data->dev_private;
1014 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1015 dev_info->if_index = 0;
1016 dev_info->max_mac_addrs = 1;
1017 dev_info->max_rx_pktlen = (uint32_t)-1;
1018 dev_info->max_rx_queues = internals->max_rx_queues;
1019 dev_info->max_tx_queues = internals->max_tx_queues;
1020 dev_info->min_rx_bufsize = 0;
1021 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
1022 dev_info->tx_offload_capa = 0;
1023 dev_info->rx_queue_offload_capa = 0;
1024 dev_info->tx_queue_offload_capa = 0;
1025 dev_info->speed_capa = ETH_LINK_SPEED_100G;
1029 eth_stats_get(struct rte_eth_dev *dev,
1030 struct rte_eth_stats *stats)
1033 uint16_t nb_rx = dev->data->nb_rx_queues;
1034 uint16_t nb_tx = dev->data->nb_tx_queues;
1035 uint64_t rx_total = 0;
1036 uint64_t tx_total = 0;
1037 uint64_t tx_err_total = 0;
1038 uint64_t rx_total_bytes = 0;
1039 uint64_t tx_total_bytes = 0;
1041 for (i = 0; i < nb_rx; i++) {
1042 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1044 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1045 stats->q_ipackets[i] = rxq->rx_pkts;
1046 stats->q_ibytes[i] = rxq->rx_bytes;
1048 rx_total += rxq->rx_pkts;
1049 rx_total_bytes += rxq->rx_bytes;
1052 for (i = 0; i < nb_tx; i++) {
1053 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1055 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1056 stats->q_opackets[i] = txq->tx_pkts;
1057 stats->q_obytes[i] = txq->tx_bytes;
1058 stats->q_errors[i] = txq->err_pkts;
1060 tx_total += txq->tx_pkts;
1061 tx_total_bytes += txq->tx_bytes;
1062 tx_err_total += txq->err_pkts;
1065 stats->ipackets = rx_total;
1066 stats->opackets = tx_total;
1067 stats->ibytes = rx_total_bytes;
1068 stats->obytes = tx_total_bytes;
1069 stats->oerrors = tx_err_total;
1070 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1076 eth_stats_reset(struct rte_eth_dev *dev)
1079 uint16_t nb_rx = dev->data->nb_rx_queues;
1080 uint16_t nb_tx = dev->data->nb_tx_queues;
1082 for (i = 0; i < nb_rx; i++) {
1083 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1088 for (i = 0; i < nb_tx; i++) {
1089 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1097 eth_rx_queue_release(void *q)
1099 struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1102 if (rxq->sze != NULL)
1103 szedata_close(rxq->sze);
1109 eth_tx_queue_release(void *q)
1111 struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1114 if (txq->sze != NULL)
1115 szedata_close(txq->sze);
1121 eth_dev_close(struct rte_eth_dev *dev)
1124 uint16_t nb_rx = dev->data->nb_rx_queues;
1125 uint16_t nb_tx = dev->data->nb_tx_queues;
1129 for (i = 0; i < nb_rx; i++) {
1130 eth_rx_queue_release(dev->data->rx_queues[i]);
1131 dev->data->rx_queues[i] = NULL;
1133 dev->data->nb_rx_queues = 0;
1134 for (i = 0; i < nb_tx; i++) {
1135 eth_tx_queue_release(dev->data->tx_queues[i]);
1136 dev->data->tx_queues[i] = NULL;
1138 dev->data->nb_tx_queues = 0;
1142 * Function takes value from first IBUF status register.
1143 * Values in IBUF and OBUF should be same.
1146 * Pointer to device private structure.
1148 * Link speed constant.
1150 static inline enum szedata2_link_speed
1151 get_link_speed(const struct pmd_internals *internals)
1153 const volatile struct szedata2_ibuf *ibuf =
1154 ibuf_ptr_by_index(internals->pci_rsc, 0);
1155 uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
1158 return SZEDATA2_LINK_SPEED_10G;
1160 return SZEDATA2_LINK_SPEED_40G;
1162 return SZEDATA2_LINK_SPEED_100G;
1164 return SZEDATA2_LINK_SPEED_DEFAULT;
1169 eth_link_update(struct rte_eth_dev *dev,
1170 int wait_to_complete __rte_unused)
1172 struct rte_eth_link link;
1173 struct pmd_internals *internals = (struct pmd_internals *)
1174 dev->data->dev_private;
1175 const volatile struct szedata2_ibuf *ibuf;
1177 bool link_is_up = false;
1179 memset(&link, 0, sizeof(link));
1181 switch (get_link_speed(internals)) {
1182 case SZEDATA2_LINK_SPEED_10G:
1183 link.link_speed = ETH_SPEED_NUM_10G;
1185 case SZEDATA2_LINK_SPEED_40G:
1186 link.link_speed = ETH_SPEED_NUM_40G;
1188 case SZEDATA2_LINK_SPEED_100G:
1189 link.link_speed = ETH_SPEED_NUM_100G;
1192 link.link_speed = ETH_SPEED_NUM_10G;
1196 /* szedata2 uses only full duplex */
1197 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1199 for (i = 0; i < szedata2_ibuf_count; i++) {
1200 ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
1202 * Link is considered up if at least one ibuf is enabled
1205 if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
1211 link.link_status = link_is_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1213 link.link_autoneg = ETH_LINK_FIXED;
1215 rte_eth_linkstatus_set(dev, &link);
1220 eth_dev_set_link_up(struct rte_eth_dev *dev)
1222 struct pmd_internals *internals = (struct pmd_internals *)
1223 dev->data->dev_private;
1226 for (i = 0; i < szedata2_ibuf_count; i++)
1227 ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
1228 for (i = 0; i < szedata2_obuf_count; i++)
1229 obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
1234 eth_dev_set_link_down(struct rte_eth_dev *dev)
1236 struct pmd_internals *internals = (struct pmd_internals *)
1237 dev->data->dev_private;
1240 for (i = 0; i < szedata2_ibuf_count; i++)
1241 ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
1242 for (i = 0; i < szedata2_obuf_count; i++)
1243 obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
1248 eth_rx_queue_setup(struct rte_eth_dev *dev,
1249 uint16_t rx_queue_id,
1250 uint16_t nb_rx_desc __rte_unused,
1251 unsigned int socket_id,
1252 const struct rte_eth_rxconf *rx_conf __rte_unused,
1253 struct rte_mempool *mb_pool)
1255 struct pmd_internals *internals = dev->data->dev_private;
1256 struct szedata2_rx_queue *rxq;
1258 uint32_t rx = 1 << rx_queue_id;
1261 if (dev->data->rx_queues[rx_queue_id] != NULL) {
1262 eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
1263 dev->data->rx_queues[rx_queue_id] = NULL;
1266 rxq = rte_zmalloc_socket("szedata2 rx queue",
1267 sizeof(struct szedata2_rx_queue),
1268 RTE_CACHE_LINE_SIZE, socket_id);
1270 RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for rx queue id "
1271 "%" PRIu16 "!\n", rx_queue_id);
1275 rxq->priv = internals;
1276 rxq->sze = szedata_open(internals->sze_dev);
1277 if (rxq->sze == NULL) {
1278 RTE_LOG(ERR, PMD, "szedata_open() failed for rx queue id "
1279 "%" PRIu16 "!\n", rx_queue_id);
1280 eth_rx_queue_release(rxq);
1283 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1284 if (ret != 0 || rx == 0) {
1285 RTE_LOG(ERR, PMD, "szedata_subscribe3() failed for rx queue id "
1286 "%" PRIu16 "!\n", rx_queue_id);
1287 eth_rx_queue_release(rxq);
1290 rxq->rx_channel = rx_queue_id;
1291 rxq->in_port = dev->data->port_id;
1292 rxq->mb_pool = mb_pool;
1297 dev->data->rx_queues[rx_queue_id] = rxq;
1299 RTE_LOG(DEBUG, PMD, "Configured rx queue id %" PRIu16 " on socket "
1300 "%u.\n", rx_queue_id, socket_id);
1306 eth_tx_queue_setup(struct rte_eth_dev *dev,
1307 uint16_t tx_queue_id,
1308 uint16_t nb_tx_desc __rte_unused,
1309 unsigned int socket_id,
1310 const struct rte_eth_txconf *tx_conf __rte_unused)
1312 struct pmd_internals *internals = dev->data->dev_private;
1313 struct szedata2_tx_queue *txq;
1316 uint32_t tx = 1 << tx_queue_id;
1318 if (dev->data->tx_queues[tx_queue_id] != NULL) {
1319 eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
1320 dev->data->tx_queues[tx_queue_id] = NULL;
1323 txq = rte_zmalloc_socket("szedata2 tx queue",
1324 sizeof(struct szedata2_tx_queue),
1325 RTE_CACHE_LINE_SIZE, socket_id);
1327 RTE_LOG(ERR, PMD, "rte_zmalloc_socket() failed for tx queue id "
1328 "%" PRIu16 "!\n", tx_queue_id);
1332 txq->priv = internals;
1333 txq->sze = szedata_open(internals->sze_dev);
1334 if (txq->sze == NULL) {
1335 RTE_LOG(ERR, PMD, "szedata_open() failed for tx queue id "
1336 "%" PRIu16 "!\n", tx_queue_id);
1337 eth_tx_queue_release(txq);
1340 ret = szedata_subscribe3(txq->sze, &rx, &tx);
1341 if (ret != 0 || tx == 0) {
1342 RTE_LOG(ERR, PMD, "szedata_subscribe3() failed for tx queue id "
1343 "%" PRIu16 "!\n", tx_queue_id);
1344 eth_tx_queue_release(txq);
1347 txq->tx_channel = tx_queue_id;
1352 dev->data->tx_queues[tx_queue_id] = txq;
1354 RTE_LOG(DEBUG, PMD, "Configured tx queue id %" PRIu16 " on socket "
1355 "%u.\n", tx_queue_id, socket_id);
1361 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1362 struct ether_addr *mac_addr __rte_unused)
1367 eth_promiscuous_enable(struct rte_eth_dev *dev)
1369 struct pmd_internals *internals = (struct pmd_internals *)
1370 dev->data->dev_private;
1373 for (i = 0; i < szedata2_ibuf_count; i++) {
1374 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1375 SZEDATA2_MAC_CHMODE_PROMISC);
1380 eth_promiscuous_disable(struct rte_eth_dev *dev)
1382 struct pmd_internals *internals = (struct pmd_internals *)
1383 dev->data->dev_private;
1386 for (i = 0; i < szedata2_ibuf_count; i++) {
1387 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1388 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1393 eth_allmulticast_enable(struct rte_eth_dev *dev)
1395 struct pmd_internals *internals = (struct pmd_internals *)
1396 dev->data->dev_private;
1399 for (i = 0; i < szedata2_ibuf_count; i++) {
1400 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1401 SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
1406 eth_allmulticast_disable(struct rte_eth_dev *dev)
1408 struct pmd_internals *internals = (struct pmd_internals *)
1409 dev->data->dev_private;
1412 for (i = 0; i < szedata2_ibuf_count; i++) {
1413 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1414 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1418 static const struct eth_dev_ops ops = {
1419 .dev_start = eth_dev_start,
1420 .dev_stop = eth_dev_stop,
1421 .dev_set_link_up = eth_dev_set_link_up,
1422 .dev_set_link_down = eth_dev_set_link_down,
1423 .dev_close = eth_dev_close,
1424 .dev_configure = eth_dev_configure,
1425 .dev_infos_get = eth_dev_info,
1426 .promiscuous_enable = eth_promiscuous_enable,
1427 .promiscuous_disable = eth_promiscuous_disable,
1428 .allmulticast_enable = eth_allmulticast_enable,
1429 .allmulticast_disable = eth_allmulticast_disable,
1430 .rx_queue_start = eth_rx_queue_start,
1431 .rx_queue_stop = eth_rx_queue_stop,
1432 .tx_queue_start = eth_tx_queue_start,
1433 .tx_queue_stop = eth_tx_queue_stop,
1434 .rx_queue_setup = eth_rx_queue_setup,
1435 .tx_queue_setup = eth_tx_queue_setup,
1436 .rx_queue_release = eth_rx_queue_release,
1437 .tx_queue_release = eth_tx_queue_release,
1438 .link_update = eth_link_update,
1439 .stats_get = eth_stats_get,
1440 .stats_reset = eth_stats_reset,
1441 .mac_addr_set = eth_mac_addr_set,
1445 * This function goes through sysfs and looks for an index of szedata2
1446 * device file (/dev/szedataIIX, where X is the index).
1453 get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
1456 struct dirent *entry;
1460 char pcislot_path[PATH_MAX];
1466 dir = opendir("/sys/class/combo");
1471 * Iterate through all combosixX directories.
1472 * When the value in /sys/class/combo/combosixX/device/pcislot
1473 * file is the location of the ethernet device dev, "X" is the
1474 * index of the device.
1476 while ((entry = readdir(dir)) != NULL) {
1477 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1481 snprintf(pcislot_path, PATH_MAX,
1482 "/sys/class/combo/combosix%u/device/pcislot",
1485 fd = fopen(pcislot_path, "r");
1489 ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
1490 &domain, &bus, &devid, &function);
1495 if (pcislot_addr->domain == domain &&
1496 pcislot_addr->bus == bus &&
1497 pcislot_addr->devid == devid &&
1498 pcislot_addr->function == function) {
1510 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1512 struct rte_eth_dev_data *data = dev->data;
1513 struct pmd_internals *internals = (struct pmd_internals *)
1515 struct szedata *szedata_temp;
1517 uint32_t szedata2_index;
1518 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1519 struct rte_pci_addr *pci_addr = &pci_dev->addr;
1520 struct rte_mem_resource *pci_rsc =
1521 &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1522 char rsc_filename[PATH_MAX];
1523 void *pci_resource_ptr = NULL;
1526 RTE_LOG(INFO, PMD, "Initializing szedata2 device (" PCI_PRI_FMT ")\n",
1527 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1528 pci_addr->function);
1530 internals->dev = dev;
1532 /* Get index of szedata2 device file and create path to device file */
1533 ret = get_szedata2_index(pci_addr, &szedata2_index);
1535 RTE_LOG(ERR, PMD, "Failed to get szedata2 device index!\n");
1538 snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1541 RTE_LOG(INFO, PMD, "SZEDATA2 path: %s\n", internals->sze_dev);
1544 * Get number of available DMA RX and TX channels, which is maximum
1545 * number of queues that can be created and store it in private device
1548 szedata_temp = szedata_open(internals->sze_dev);
1549 if (szedata_temp == NULL) {
1550 RTE_LOG(ERR, PMD, "szedata_open(): failed to open %s",
1551 internals->sze_dev);
1554 internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1556 internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1558 szedata_close(szedata_temp);
1560 RTE_LOG(INFO, PMD, "Available DMA channels RX: %u TX: %u\n",
1561 internals->max_rx_queues, internals->max_tx_queues);
1563 /* Set rx, tx burst functions */
1564 if (data->scattered_rx == 1)
1565 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1567 dev->rx_pkt_burst = eth_szedata2_rx;
1568 dev->tx_pkt_burst = eth_szedata2_tx;
1570 /* Set function callbacks for Ethernet API */
1571 dev->dev_ops = &ops;
1573 rte_eth_copy_pci_info(dev, pci_dev);
1575 /* mmap pci resource0 file to rte_mem_resource structure */
1576 if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1578 RTE_LOG(ERR, PMD, "Missing resource%u file\n",
1579 PCI_RESOURCE_NUMBER);
1582 snprintf(rsc_filename, PATH_MAX,
1583 "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
1584 pci_addr->domain, pci_addr->bus,
1585 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1586 fd = open(rsc_filename, O_RDWR);
1588 RTE_LOG(ERR, PMD, "Could not open file %s\n", rsc_filename);
1592 pci_resource_ptr = mmap(0,
1593 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1594 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1596 if (pci_resource_ptr == MAP_FAILED) {
1597 RTE_LOG(ERR, PMD, "Could not mmap file %s (fd = %d)\n",
1601 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
1602 internals->pci_rsc = pci_rsc;
1604 RTE_LOG(DEBUG, PMD, "resource%u phys_addr = 0x%llx len = %llu "
1605 "virt addr = %llx\n", PCI_RESOURCE_NUMBER,
1606 (unsigned long long)pci_rsc->phys_addr,
1607 (unsigned long long)pci_rsc->len,
1608 (unsigned long long)pci_rsc->addr);
1610 /* Get link state */
1611 eth_link_update(dev, 0);
1613 /* Allocate space for one mac address */
1614 data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1615 RTE_CACHE_LINE_SIZE);
1616 if (data->mac_addrs == NULL) {
1617 RTE_LOG(ERR, PMD, "Could not alloc space for MAC address!\n");
1618 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1619 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1623 ether_addr_copy(ð_addr, data->mac_addrs);
1625 /* At initial state COMBO card is in promiscuous mode so disable it */
1626 eth_promiscuous_disable(dev);
1628 RTE_LOG(INFO, PMD, "szedata2 device ("
1629 PCI_PRI_FMT ") successfully initialized\n",
1630 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1631 pci_addr->function);
1637 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1639 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1640 struct rte_pci_addr *pci_addr = &pci_dev->addr;
1642 rte_free(dev->data->mac_addrs);
1643 dev->data->mac_addrs = NULL;
1644 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1645 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1647 RTE_LOG(INFO, PMD, "szedata2 device ("
1648 PCI_PRI_FMT ") successfully uninitialized\n",
1649 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1650 pci_addr->function);
1655 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1657 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1658 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1661 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1662 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1665 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1666 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1673 static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1674 struct rte_pci_device *pci_dev)
1676 return rte_eth_dev_pci_generic_probe(pci_dev,
1677 sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
1680 static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
1682 return rte_eth_dev_pci_generic_remove(pci_dev,
1683 rte_szedata2_eth_dev_uninit);
1686 static struct rte_pci_driver szedata2_eth_driver = {
1687 .id_table = rte_szedata2_pci_id_table,
1688 .probe = szedata2_eth_pci_probe,
1689 .remove = szedata2_eth_pci_remove,
1692 RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
1693 RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
1694 RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
1695 "* combo6core & combov3 & szedata2 & szedata2_cv3");