1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015 - 2016 CESNET
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_malloc.h>
21 #include <rte_memcpy.h>
22 #include <rte_kvargs.h>
25 #include "rte_eth_szedata2.h"
26 #include "szedata2_logs.h"
27 #include "szedata2_iobuf.h"
29 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
30 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
31 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
34 * size of szedata2_packet header with alignment
36 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
38 #define RTE_SZEDATA2_DRIVER_NAME net_szedata2
40 #define SZEDATA2_DEV_PATH_FMT "/dev/szedataII%u"
42 struct pmd_internals {
43 struct rte_eth_dev *dev;
44 uint16_t max_rx_queues;
45 uint16_t max_tx_queues;
46 char sze_dev[PATH_MAX];
47 struct rte_mem_resource *pci_rsc;
50 struct szedata2_rx_queue {
51 struct pmd_internals *priv;
55 struct rte_mempool *mb_pool;
56 volatile uint64_t rx_pkts;
57 volatile uint64_t rx_bytes;
58 volatile uint64_t err_pkts;
61 struct szedata2_tx_queue {
62 struct pmd_internals *priv;
65 volatile uint64_t tx_pkts;
66 volatile uint64_t tx_bytes;
67 volatile uint64_t err_pkts;
70 int szedata2_logtype_init;
71 int szedata2_logtype_driver;
73 static struct ether_addr eth_addr = {
74 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
78 eth_szedata2_rx(void *queue,
79 struct rte_mbuf **bufs,
83 struct rte_mbuf *mbuf;
84 struct szedata2_rx_queue *sze_q = queue;
85 struct rte_pktmbuf_pool_private *mbp_priv;
91 uint64_t num_bytes = 0;
92 struct szedata *sze = sze_q->sze;
93 uint8_t *header_ptr = NULL; /* header of packet */
94 uint8_t *packet_ptr1 = NULL;
95 uint8_t *packet_ptr2 = NULL;
96 uint16_t packet_len1 = 0;
97 uint16_t packet_len2 = 0;
98 uint16_t hw_data_align;
100 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
104 * Reads the given number of packets from szedata2 channel given
105 * by queue and copies the packet data into a newly allocated mbuf
108 for (i = 0; i < nb_pkts; i++) {
109 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
111 if (unlikely(mbuf == NULL)) {
112 sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
116 /* get the next sze packet */
117 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
118 sze->ct_rx_lck->next == NULL) {
119 /* unlock old data */
120 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
121 sze->ct_rx_lck_orig = NULL;
122 sze->ct_rx_lck = NULL;
125 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
126 /* nothing to read, lock new data */
127 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
128 sze->ct_rx_lck_orig = sze->ct_rx_lck;
130 if (sze->ct_rx_lck == NULL) {
131 /* nothing to lock */
132 rte_pktmbuf_free(mbuf);
136 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
137 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
139 if (!sze->ct_rx_rem_bytes) {
140 rte_pktmbuf_free(mbuf);
145 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
148 * copy parts of header to merge buffer
150 if (sze->ct_rx_lck->next == NULL) {
151 rte_pktmbuf_free(mbuf);
155 /* copy first part of header */
156 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
157 sze->ct_rx_rem_bytes);
159 /* copy second part of header */
160 sze->ct_rx_lck = sze->ct_rx_lck->next;
161 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
162 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
164 RTE_SZE2_PACKET_HEADER_SIZE -
165 sze->ct_rx_rem_bytes);
167 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
168 sze->ct_rx_rem_bytes;
169 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
170 RTE_SZE2_PACKET_HEADER_SIZE +
171 sze->ct_rx_rem_bytes;
173 header_ptr = (uint8_t *)sze->ct_rx_buffer;
176 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
177 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
178 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
181 sg_size = le16toh(*((uint16_t *)header_ptr));
182 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
183 packet_size = sg_size -
184 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
187 /* checks if packet all right */
189 errx(5, "Zero segsize");
191 /* check sg_size and hwsize */
192 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
193 errx(10, "Hwsize bigger than expected. Segsize: %d, "
194 "hwsize: %d", sg_size, hw_size);
198 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
199 RTE_SZE2_PACKET_HEADER_SIZE;
201 if (sze->ct_rx_rem_bytes >=
203 RTE_SZE2_PACKET_HEADER_SIZE)) {
205 /* one packet ready - go to another */
206 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
207 packet_len1 = packet_size;
211 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
212 RTE_SZE2_PACKET_HEADER_SIZE;
213 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
214 RTE_SZE2_PACKET_HEADER_SIZE;
217 if (sze->ct_rx_lck->next == NULL) {
218 errx(6, "Need \"next\" lock, "
219 "but it is missing: %u",
220 sze->ct_rx_rem_bytes);
224 if (sze->ct_rx_rem_bytes <= hw_data_align) {
225 uint16_t rem_size = hw_data_align -
226 sze->ct_rx_rem_bytes;
228 /* MOVE to next lock */
229 sze->ct_rx_lck = sze->ct_rx_lck->next;
231 (void *)(((uint8_t *)
232 (sze->ct_rx_lck->start)) + rem_size);
234 packet_ptr1 = sze->ct_rx_cur_ptr;
235 packet_len1 = packet_size;
239 sze->ct_rx_cur_ptr +=
240 RTE_SZE2_ALIGN8(packet_size);
241 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
242 rem_size - RTE_SZE2_ALIGN8(packet_size);
244 /* get pointer and length from first part */
245 packet_ptr1 = sze->ct_rx_cur_ptr +
247 packet_len1 = sze->ct_rx_rem_bytes -
250 /* MOVE to next lock */
251 sze->ct_rx_lck = sze->ct_rx_lck->next;
252 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
254 /* get pointer and length from second part */
255 packet_ptr2 = sze->ct_rx_cur_ptr;
256 packet_len2 = packet_size - packet_len1;
258 sze->ct_rx_cur_ptr +=
259 RTE_SZE2_ALIGN8(packet_size) -
261 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
262 (RTE_SZE2_ALIGN8(packet_size) -
267 if (unlikely(packet_ptr1 == NULL)) {
268 rte_pktmbuf_free(mbuf);
272 /* get the space available for data in the mbuf */
273 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
274 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
275 RTE_PKTMBUF_HEADROOM);
277 if (packet_size <= buf_size) {
278 /* sze packet will fit in one mbuf, go ahead and copy */
279 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
280 packet_ptr1, packet_len1);
281 if (packet_ptr2 != NULL) {
282 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
283 uint8_t *) + packet_len1),
284 packet_ptr2, packet_len2);
286 mbuf->data_len = (uint16_t)packet_size;
288 mbuf->pkt_len = packet_size;
289 mbuf->port = sze_q->in_port;
292 num_bytes += packet_size;
295 * sze packet will not fit in one mbuf,
296 * scattered mode is not enabled, drop packet
299 "SZE segment %d bytes will not fit in one mbuf "
300 "(%d bytes), scattered mode is not enabled, "
302 packet_size, buf_size);
303 rte_pktmbuf_free(mbuf);
307 sze_q->rx_pkts += num_rx;
308 sze_q->rx_bytes += num_bytes;
313 eth_szedata2_rx_scattered(void *queue,
314 struct rte_mbuf **bufs,
318 struct rte_mbuf *mbuf;
319 struct szedata2_rx_queue *sze_q = queue;
320 struct rte_pktmbuf_pool_private *mbp_priv;
325 uint16_t packet_size;
326 uint64_t num_bytes = 0;
327 struct szedata *sze = sze_q->sze;
328 uint8_t *header_ptr = NULL; /* header of packet */
329 uint8_t *packet_ptr1 = NULL;
330 uint8_t *packet_ptr2 = NULL;
331 uint16_t packet_len1 = 0;
332 uint16_t packet_len2 = 0;
333 uint16_t hw_data_align;
334 uint64_t *mbuf_failed_ptr =
335 &sze_q->priv->dev->data->rx_mbuf_alloc_failed;
337 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
341 * Reads the given number of packets from szedata2 channel given
342 * by queue and copies the packet data into a newly allocated mbuf
345 for (i = 0; i < nb_pkts; i++) {
346 const struct szedata_lock *ct_rx_lck_backup;
347 unsigned int ct_rx_rem_bytes_backup;
348 unsigned char *ct_rx_cur_ptr_backup;
350 /* get the next sze packet */
351 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
352 sze->ct_rx_lck->next == NULL) {
353 /* unlock old data */
354 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
355 sze->ct_rx_lck_orig = NULL;
356 sze->ct_rx_lck = NULL;
360 * Store items from sze structure which can be changed
361 * before mbuf allocating. Use these items in case of mbuf
362 * allocating failure.
364 ct_rx_lck_backup = sze->ct_rx_lck;
365 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
366 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
368 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
369 /* nothing to read, lock new data */
370 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
371 sze->ct_rx_lck_orig = sze->ct_rx_lck;
374 * Backup items from sze structure must be updated
375 * after locking to contain pointers to new locks.
377 ct_rx_lck_backup = sze->ct_rx_lck;
378 ct_rx_rem_bytes_backup = sze->ct_rx_rem_bytes;
379 ct_rx_cur_ptr_backup = sze->ct_rx_cur_ptr;
381 if (sze->ct_rx_lck == NULL)
382 /* nothing to lock */
385 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
386 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
388 if (!sze->ct_rx_rem_bytes)
392 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
394 * cut in header - copy parts of header to merge buffer
396 if (sze->ct_rx_lck->next == NULL)
399 /* copy first part of header */
400 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
401 sze->ct_rx_rem_bytes);
403 /* copy second part of header */
404 sze->ct_rx_lck = sze->ct_rx_lck->next;
405 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
406 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
408 RTE_SZE2_PACKET_HEADER_SIZE -
409 sze->ct_rx_rem_bytes);
411 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
412 sze->ct_rx_rem_bytes;
413 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
414 RTE_SZE2_PACKET_HEADER_SIZE +
415 sze->ct_rx_rem_bytes;
417 header_ptr = (uint8_t *)sze->ct_rx_buffer;
420 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
421 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
422 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
425 sg_size = le16toh(*((uint16_t *)header_ptr));
426 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
427 packet_size = sg_size -
428 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
431 /* checks if packet all right */
433 errx(5, "Zero segsize");
435 /* check sg_size and hwsize */
436 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
437 errx(10, "Hwsize bigger than expected. Segsize: %d, "
438 "hwsize: %d", sg_size, hw_size);
442 RTE_SZE2_ALIGN8((RTE_SZE2_PACKET_HEADER_SIZE +
443 hw_size)) - RTE_SZE2_PACKET_HEADER_SIZE;
445 if (sze->ct_rx_rem_bytes >=
447 RTE_SZE2_PACKET_HEADER_SIZE)) {
449 /* one packet ready - go to another */
450 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
451 packet_len1 = packet_size;
455 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
456 RTE_SZE2_PACKET_HEADER_SIZE;
457 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
458 RTE_SZE2_PACKET_HEADER_SIZE;
461 if (sze->ct_rx_lck->next == NULL) {
462 errx(6, "Need \"next\" lock, but it is "
463 "missing: %u", sze->ct_rx_rem_bytes);
467 if (sze->ct_rx_rem_bytes <= hw_data_align) {
468 uint16_t rem_size = hw_data_align -
469 sze->ct_rx_rem_bytes;
471 /* MOVE to next lock */
472 sze->ct_rx_lck = sze->ct_rx_lck->next;
474 (void *)(((uint8_t *)
475 (sze->ct_rx_lck->start)) + rem_size);
477 packet_ptr1 = sze->ct_rx_cur_ptr;
478 packet_len1 = packet_size;
482 sze->ct_rx_cur_ptr +=
483 RTE_SZE2_ALIGN8(packet_size);
484 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
485 rem_size - RTE_SZE2_ALIGN8(packet_size);
487 /* get pointer and length from first part */
488 packet_ptr1 = sze->ct_rx_cur_ptr +
490 packet_len1 = sze->ct_rx_rem_bytes -
493 /* MOVE to next lock */
494 sze->ct_rx_lck = sze->ct_rx_lck->next;
495 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
497 /* get pointer and length from second part */
498 packet_ptr2 = sze->ct_rx_cur_ptr;
499 packet_len2 = packet_size - packet_len1;
501 sze->ct_rx_cur_ptr +=
502 RTE_SZE2_ALIGN8(packet_size) -
504 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
505 (RTE_SZE2_ALIGN8(packet_size) -
510 if (unlikely(packet_ptr1 == NULL))
513 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
515 if (unlikely(mbuf == NULL)) {
517 * Restore items from sze structure to state after
518 * unlocking (eventually locking).
520 sze->ct_rx_lck = ct_rx_lck_backup;
521 sze->ct_rx_rem_bytes = ct_rx_rem_bytes_backup;
522 sze->ct_rx_cur_ptr = ct_rx_cur_ptr_backup;
523 sze_q->priv->dev->data->rx_mbuf_alloc_failed++;
527 /* get the space available for data in the mbuf */
528 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
529 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
530 RTE_PKTMBUF_HEADROOM);
532 if (packet_size <= buf_size) {
533 /* sze packet will fit in one mbuf, go ahead and copy */
534 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
535 packet_ptr1, packet_len1);
536 if (packet_ptr2 != NULL) {
538 (rte_pktmbuf_mtod(mbuf, uint8_t *) +
539 packet_len1), packet_ptr2, packet_len2);
541 mbuf->data_len = (uint16_t)packet_size;
544 * sze packet will not fit in one mbuf,
545 * scatter packet into more mbufs
547 struct rte_mbuf *m = mbuf;
548 uint16_t len = rte_pktmbuf_tailroom(mbuf);
550 /* copy first part of packet */
551 /* fill first mbuf */
552 rte_memcpy(rte_pktmbuf_append(mbuf, len), packet_ptr1,
555 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
557 while (packet_len1 > 0) {
559 m->next = rte_pktmbuf_alloc(sze_q->mb_pool);
561 if (unlikely(m->next == NULL)) {
562 rte_pktmbuf_free(mbuf);
564 * Restore items from sze structure
565 * to state after unlocking (eventually
568 sze->ct_rx_lck = ct_rx_lck_backup;
569 sze->ct_rx_rem_bytes =
570 ct_rx_rem_bytes_backup;
572 ct_rx_cur_ptr_backup;
573 (*mbuf_failed_ptr)++;
579 len = RTE_MIN(rte_pktmbuf_tailroom(m),
581 rte_memcpy(rte_pktmbuf_append(mbuf, len),
586 packet_ptr1 = ((uint8_t *)packet_ptr1) + len;
589 if (packet_ptr2 != NULL) {
590 /* copy second part of packet, if exists */
591 /* fill the rest of currently last mbuf */
592 len = rte_pktmbuf_tailroom(m);
593 rte_memcpy(rte_pktmbuf_append(mbuf, len),
596 packet_ptr2 = ((uint8_t *)packet_ptr2) + len;
598 while (packet_len2 > 0) {
600 m->next = rte_pktmbuf_alloc(
603 if (unlikely(m->next == NULL)) {
604 rte_pktmbuf_free(mbuf);
606 * Restore items from sze
607 * structure to state after
608 * unlocking (eventually
613 sze->ct_rx_rem_bytes =
614 ct_rx_rem_bytes_backup;
616 ct_rx_cur_ptr_backup;
617 (*mbuf_failed_ptr)++;
623 len = RTE_MIN(rte_pktmbuf_tailroom(m),
626 rte_pktmbuf_append(mbuf, len),
631 packet_ptr2 = ((uint8_t *)packet_ptr2) +
636 mbuf->pkt_len = packet_size;
637 mbuf->port = sze_q->in_port;
640 num_bytes += packet_size;
644 sze_q->rx_pkts += num_rx;
645 sze_q->rx_bytes += num_bytes;
650 eth_szedata2_tx(void *queue,
651 struct rte_mbuf **bufs,
654 struct rte_mbuf *mbuf;
655 struct szedata2_tx_queue *sze_q = queue;
657 uint64_t num_bytes = 0;
659 const struct szedata_lock *lck;
665 uint32_t unlock_size;
668 uint16_t pkt_left = nb_pkts;
670 if (sze_q->sze == NULL || nb_pkts == 0)
673 while (pkt_left > 0) {
675 lck = szedata_tx_lock_data(sze_q->sze,
676 RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
682 lock_size = lck->len;
683 lock_size2 = lck->next ? lck->next->len : 0;
686 mbuf = bufs[nb_pkts - pkt_left];
688 pkt_len = mbuf->pkt_len;
689 mbuf_segs = mbuf->nb_segs;
691 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
692 RTE_SZE2_ALIGN8(pkt_len);
694 if (lock_size + lock_size2 < hwpkt_len) {
695 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
699 num_bytes += pkt_len;
701 if (lock_size > hwpkt_len) {
706 /* write packet length at first 2 bytes in 8B header */
707 *((uint16_t *)dst) = htole16(
708 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
710 *(((uint16_t *)dst) + 1) = htole16(0);
712 /* copy packet from mbuf */
713 tmp_dst = ((uint8_t *)(dst)) +
714 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
715 if (mbuf_segs == 1) {
717 * non-scattered packet,
718 * transmit from one mbuf
721 rte_pktmbuf_mtod(mbuf, const void *),
724 /* scattered packet, transmit from more mbufs */
725 struct rte_mbuf *m = mbuf;
731 tmp_dst = ((uint8_t *)(tmp_dst)) +
738 dst = ((uint8_t *)dst) + hwpkt_len;
739 unlock_size += hwpkt_len;
740 lock_size -= hwpkt_len;
742 rte_pktmbuf_free(mbuf);
746 szedata_tx_unlock_data(sze_q->sze, lck,
751 } else if (lock_size + lock_size2 >= hwpkt_len) {
755 /* write packet length at first 2 bytes in 8B header */
757 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
759 *(((uint16_t *)dst) + 1) = htole16(0);
762 * If the raw packet (pkt_len) is smaller than lock_size
763 * get the correct length for memcpy
766 pkt_len < lock_size -
767 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
769 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
771 rem_len = hwpkt_len - lock_size;
773 tmp_dst = ((uint8_t *)(dst)) +
774 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
775 if (mbuf_segs == 1) {
777 * non-scattered packet,
778 * transmit from one mbuf
780 /* copy part of packet to first area */
782 rte_pktmbuf_mtod(mbuf, const void *),
786 dst = lck->next->start;
788 /* copy part of packet to second area */
790 (const void *)(rte_pktmbuf_mtod(mbuf,
792 write_len), pkt_len - write_len);
794 /* scattered packet, transmit from more mbufs */
795 struct rte_mbuf *m = mbuf;
796 uint16_t written = 0;
797 uint16_t to_write = 0;
798 bool new_mbuf = true;
799 uint16_t write_off = 0;
801 /* copy part of packet to first area */
802 while (m && written < write_len) {
803 to_write = RTE_MIN(m->data_len,
804 write_len - written);
810 tmp_dst = ((uint8_t *)(tmp_dst)) +
812 if (m->data_len <= write_len -
823 dst = lck->next->start;
827 write_off = new_mbuf ? 0 : to_write;
829 /* copy part of packet to second area */
830 while (m && written < pkt_len - write_len) {
831 rte_memcpy(tmp_dst, (const void *)
833 uint8_t *) + write_off),
834 m->data_len - write_off);
836 tmp_dst = ((uint8_t *)(tmp_dst)) +
837 (m->data_len - write_off);
838 written += m->data_len - write_off;
844 dst = ((uint8_t *)dst) + rem_len;
845 unlock_size += hwpkt_len;
846 lock_size = lock_size2 - rem_len;
849 rte_pktmbuf_free(mbuf);
853 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
857 sze_q->tx_pkts += num_tx;
858 sze_q->err_pkts += nb_pkts - num_tx;
859 sze_q->tx_bytes += num_bytes;
864 eth_rx_queue_start(struct rte_eth_dev *dev, uint16_t rxq_id)
866 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
868 struct pmd_internals *internals = (struct pmd_internals *)
869 dev->data->dev_private;
871 if (rxq->sze == NULL) {
872 uint32_t rx = 1 << rxq->rx_channel;
874 rxq->sze = szedata_open(internals->sze_dev);
875 if (rxq->sze == NULL)
877 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
878 if (ret != 0 || rx == 0)
882 ret = szedata_start(rxq->sze);
885 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STARTED;
889 szedata_close(rxq->sze);
895 eth_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rxq_id)
897 struct szedata2_rx_queue *rxq = dev->data->rx_queues[rxq_id];
899 if (rxq->sze != NULL) {
900 szedata_close(rxq->sze);
904 dev->data->rx_queue_state[rxq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
909 eth_tx_queue_start(struct rte_eth_dev *dev, uint16_t txq_id)
911 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
913 struct pmd_internals *internals = (struct pmd_internals *)
914 dev->data->dev_private;
916 if (txq->sze == NULL) {
918 uint32_t tx = 1 << txq->tx_channel;
919 txq->sze = szedata_open(internals->sze_dev);
920 if (txq->sze == NULL)
922 ret = szedata_subscribe3(txq->sze, &rx, &tx);
923 if (ret != 0 || tx == 0)
927 ret = szedata_start(txq->sze);
930 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STARTED;
934 szedata_close(txq->sze);
940 eth_tx_queue_stop(struct rte_eth_dev *dev, uint16_t txq_id)
942 struct szedata2_tx_queue *txq = dev->data->tx_queues[txq_id];
944 if (txq->sze != NULL) {
945 szedata_close(txq->sze);
949 dev->data->tx_queue_state[txq_id] = RTE_ETH_QUEUE_STATE_STOPPED;
954 eth_dev_start(struct rte_eth_dev *dev)
958 uint16_t nb_rx = dev->data->nb_rx_queues;
959 uint16_t nb_tx = dev->data->nb_tx_queues;
961 for (i = 0; i < nb_rx; i++) {
962 ret = eth_rx_queue_start(dev, i);
967 for (i = 0; i < nb_tx; i++) {
968 ret = eth_tx_queue_start(dev, i);
976 for (i = 0; i < nb_tx; i++)
977 eth_tx_queue_stop(dev, i);
979 for (i = 0; i < nb_rx; i++)
980 eth_rx_queue_stop(dev, i);
985 eth_dev_stop(struct rte_eth_dev *dev)
988 uint16_t nb_rx = dev->data->nb_rx_queues;
989 uint16_t nb_tx = dev->data->nb_tx_queues;
991 for (i = 0; i < nb_tx; i++)
992 eth_tx_queue_stop(dev, i);
994 for (i = 0; i < nb_rx; i++)
995 eth_rx_queue_stop(dev, i);
999 eth_dev_configure(struct rte_eth_dev *dev)
1001 struct rte_eth_dev_data *data = dev->data;
1002 if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
1003 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1004 data->scattered_rx = 1;
1006 dev->rx_pkt_burst = eth_szedata2_rx;
1007 data->scattered_rx = 0;
1013 eth_dev_info(struct rte_eth_dev *dev,
1014 struct rte_eth_dev_info *dev_info)
1016 struct pmd_internals *internals = dev->data->dev_private;
1018 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1019 dev_info->if_index = 0;
1020 dev_info->max_mac_addrs = 1;
1021 dev_info->max_rx_pktlen = (uint32_t)-1;
1022 dev_info->max_rx_queues = internals->max_rx_queues;
1023 dev_info->max_tx_queues = internals->max_tx_queues;
1024 dev_info->min_rx_bufsize = 0;
1025 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
1026 dev_info->tx_offload_capa = 0;
1027 dev_info->rx_queue_offload_capa = 0;
1028 dev_info->tx_queue_offload_capa = 0;
1029 dev_info->speed_capa = ETH_LINK_SPEED_100G;
1033 eth_stats_get(struct rte_eth_dev *dev,
1034 struct rte_eth_stats *stats)
1037 uint16_t nb_rx = dev->data->nb_rx_queues;
1038 uint16_t nb_tx = dev->data->nb_tx_queues;
1039 uint64_t rx_total = 0;
1040 uint64_t tx_total = 0;
1041 uint64_t tx_err_total = 0;
1042 uint64_t rx_total_bytes = 0;
1043 uint64_t tx_total_bytes = 0;
1045 for (i = 0; i < nb_rx; i++) {
1046 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1048 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1049 stats->q_ipackets[i] = rxq->rx_pkts;
1050 stats->q_ibytes[i] = rxq->rx_bytes;
1052 rx_total += rxq->rx_pkts;
1053 rx_total_bytes += rxq->rx_bytes;
1056 for (i = 0; i < nb_tx; i++) {
1057 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1059 if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
1060 stats->q_opackets[i] = txq->tx_pkts;
1061 stats->q_obytes[i] = txq->tx_bytes;
1062 stats->q_errors[i] = txq->err_pkts;
1064 tx_total += txq->tx_pkts;
1065 tx_total_bytes += txq->tx_bytes;
1066 tx_err_total += txq->err_pkts;
1069 stats->ipackets = rx_total;
1070 stats->opackets = tx_total;
1071 stats->ibytes = rx_total_bytes;
1072 stats->obytes = tx_total_bytes;
1073 stats->oerrors = tx_err_total;
1074 stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1080 eth_stats_reset(struct rte_eth_dev *dev)
1083 uint16_t nb_rx = dev->data->nb_rx_queues;
1084 uint16_t nb_tx = dev->data->nb_tx_queues;
1086 for (i = 0; i < nb_rx; i++) {
1087 struct szedata2_rx_queue *rxq = dev->data->rx_queues[i];
1092 for (i = 0; i < nb_tx; i++) {
1093 struct szedata2_tx_queue *txq = dev->data->tx_queues[i];
1101 eth_rx_queue_release(void *q)
1103 struct szedata2_rx_queue *rxq = (struct szedata2_rx_queue *)q;
1106 if (rxq->sze != NULL)
1107 szedata_close(rxq->sze);
1113 eth_tx_queue_release(void *q)
1115 struct szedata2_tx_queue *txq = (struct szedata2_tx_queue *)q;
1118 if (txq->sze != NULL)
1119 szedata_close(txq->sze);
1125 eth_dev_close(struct rte_eth_dev *dev)
1128 uint16_t nb_rx = dev->data->nb_rx_queues;
1129 uint16_t nb_tx = dev->data->nb_tx_queues;
1133 for (i = 0; i < nb_rx; i++) {
1134 eth_rx_queue_release(dev->data->rx_queues[i]);
1135 dev->data->rx_queues[i] = NULL;
1137 dev->data->nb_rx_queues = 0;
1138 for (i = 0; i < nb_tx; i++) {
1139 eth_tx_queue_release(dev->data->tx_queues[i]);
1140 dev->data->tx_queues[i] = NULL;
1142 dev->data->nb_tx_queues = 0;
1146 * Function takes value from first IBUF status register.
1147 * Values in IBUF and OBUF should be same.
1150 * Pointer to device private structure.
1152 * Link speed constant.
1154 static inline enum szedata2_link_speed
1155 get_link_speed(const struct pmd_internals *internals)
1157 const volatile struct szedata2_ibuf *ibuf =
1158 ibuf_ptr_by_index(internals->pci_rsc, 0);
1159 uint32_t speed = (szedata2_read32(&ibuf->ibuf_st) & 0x70) >> 4;
1162 return SZEDATA2_LINK_SPEED_10G;
1164 return SZEDATA2_LINK_SPEED_40G;
1166 return SZEDATA2_LINK_SPEED_100G;
1168 return SZEDATA2_LINK_SPEED_DEFAULT;
1173 eth_link_update(struct rte_eth_dev *dev,
1174 int wait_to_complete __rte_unused)
1176 struct rte_eth_link link;
1177 struct pmd_internals *internals = (struct pmd_internals *)
1178 dev->data->dev_private;
1179 const volatile struct szedata2_ibuf *ibuf;
1181 bool link_is_up = false;
1183 memset(&link, 0, sizeof(link));
1185 switch (get_link_speed(internals)) {
1186 case SZEDATA2_LINK_SPEED_10G:
1187 link.link_speed = ETH_SPEED_NUM_10G;
1189 case SZEDATA2_LINK_SPEED_40G:
1190 link.link_speed = ETH_SPEED_NUM_40G;
1192 case SZEDATA2_LINK_SPEED_100G:
1193 link.link_speed = ETH_SPEED_NUM_100G;
1196 link.link_speed = ETH_SPEED_NUM_10G;
1200 /* szedata2 uses only full duplex */
1201 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1203 for (i = 0; i < szedata2_ibuf_count; i++) {
1204 ibuf = ibuf_ptr_by_index(internals->pci_rsc, i);
1206 * Link is considered up if at least one ibuf is enabled
1209 if (ibuf_is_enabled(ibuf) && ibuf_is_link_up(ibuf)) {
1215 link.link_status = link_is_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1217 link.link_autoneg = ETH_LINK_FIXED;
1219 rte_eth_linkstatus_set(dev, &link);
1224 eth_dev_set_link_up(struct rte_eth_dev *dev)
1226 struct pmd_internals *internals = (struct pmd_internals *)
1227 dev->data->dev_private;
1230 for (i = 0; i < szedata2_ibuf_count; i++)
1231 ibuf_enable(ibuf_ptr_by_index(internals->pci_rsc, i));
1232 for (i = 0; i < szedata2_obuf_count; i++)
1233 obuf_enable(obuf_ptr_by_index(internals->pci_rsc, i));
1238 eth_dev_set_link_down(struct rte_eth_dev *dev)
1240 struct pmd_internals *internals = (struct pmd_internals *)
1241 dev->data->dev_private;
1244 for (i = 0; i < szedata2_ibuf_count; i++)
1245 ibuf_disable(ibuf_ptr_by_index(internals->pci_rsc, i));
1246 for (i = 0; i < szedata2_obuf_count; i++)
1247 obuf_disable(obuf_ptr_by_index(internals->pci_rsc, i));
1252 eth_rx_queue_setup(struct rte_eth_dev *dev,
1253 uint16_t rx_queue_id,
1254 uint16_t nb_rx_desc __rte_unused,
1255 unsigned int socket_id,
1256 const struct rte_eth_rxconf *rx_conf __rte_unused,
1257 struct rte_mempool *mb_pool)
1259 struct pmd_internals *internals = dev->data->dev_private;
1260 struct szedata2_rx_queue *rxq;
1262 uint32_t rx = 1 << rx_queue_id;
1265 if (dev->data->rx_queues[rx_queue_id] != NULL) {
1266 eth_rx_queue_release(dev->data->rx_queues[rx_queue_id]);
1267 dev->data->rx_queues[rx_queue_id] = NULL;
1270 rxq = rte_zmalloc_socket("szedata2 rx queue",
1271 sizeof(struct szedata2_rx_queue),
1272 RTE_CACHE_LINE_SIZE, socket_id);
1274 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for rx queue id "
1275 "%" PRIu16 "!", rx_queue_id);
1279 rxq->priv = internals;
1280 rxq->sze = szedata_open(internals->sze_dev);
1281 if (rxq->sze == NULL) {
1282 PMD_INIT_LOG(ERR, "szedata_open() failed for rx queue id "
1283 "%" PRIu16 "!", rx_queue_id);
1284 eth_rx_queue_release(rxq);
1287 ret = szedata_subscribe3(rxq->sze, &rx, &tx);
1288 if (ret != 0 || rx == 0) {
1289 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for rx queue id "
1290 "%" PRIu16 "!", rx_queue_id);
1291 eth_rx_queue_release(rxq);
1294 rxq->rx_channel = rx_queue_id;
1295 rxq->in_port = dev->data->port_id;
1296 rxq->mb_pool = mb_pool;
1301 dev->data->rx_queues[rx_queue_id] = rxq;
1303 PMD_INIT_LOG(DEBUG, "Configured rx queue id %" PRIu16 " on socket "
1304 "%u.", rx_queue_id, socket_id);
1310 eth_tx_queue_setup(struct rte_eth_dev *dev,
1311 uint16_t tx_queue_id,
1312 uint16_t nb_tx_desc __rte_unused,
1313 unsigned int socket_id,
1314 const struct rte_eth_txconf *tx_conf __rte_unused)
1316 struct pmd_internals *internals = dev->data->dev_private;
1317 struct szedata2_tx_queue *txq;
1320 uint32_t tx = 1 << tx_queue_id;
1322 if (dev->data->tx_queues[tx_queue_id] != NULL) {
1323 eth_tx_queue_release(dev->data->tx_queues[tx_queue_id]);
1324 dev->data->tx_queues[tx_queue_id] = NULL;
1327 txq = rte_zmalloc_socket("szedata2 tx queue",
1328 sizeof(struct szedata2_tx_queue),
1329 RTE_CACHE_LINE_SIZE, socket_id);
1331 PMD_INIT_LOG(ERR, "rte_zmalloc_socket() failed for tx queue id "
1332 "%" PRIu16 "!", tx_queue_id);
1336 txq->priv = internals;
1337 txq->sze = szedata_open(internals->sze_dev);
1338 if (txq->sze == NULL) {
1339 PMD_INIT_LOG(ERR, "szedata_open() failed for tx queue id "
1340 "%" PRIu16 "!", tx_queue_id);
1341 eth_tx_queue_release(txq);
1344 ret = szedata_subscribe3(txq->sze, &rx, &tx);
1345 if (ret != 0 || tx == 0) {
1346 PMD_INIT_LOG(ERR, "szedata_subscribe3() failed for tx queue id "
1347 "%" PRIu16 "!", tx_queue_id);
1348 eth_tx_queue_release(txq);
1351 txq->tx_channel = tx_queue_id;
1356 dev->data->tx_queues[tx_queue_id] = txq;
1358 PMD_INIT_LOG(DEBUG, "Configured tx queue id %" PRIu16 " on socket "
1359 "%u.", tx_queue_id, socket_id);
1365 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1366 struct ether_addr *mac_addr __rte_unused)
1371 eth_promiscuous_enable(struct rte_eth_dev *dev)
1373 struct pmd_internals *internals = (struct pmd_internals *)
1374 dev->data->dev_private;
1377 for (i = 0; i < szedata2_ibuf_count; i++) {
1378 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1379 SZEDATA2_MAC_CHMODE_PROMISC);
1384 eth_promiscuous_disable(struct rte_eth_dev *dev)
1386 struct pmd_internals *internals = (struct pmd_internals *)
1387 dev->data->dev_private;
1390 for (i = 0; i < szedata2_ibuf_count; i++) {
1391 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1392 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1397 eth_allmulticast_enable(struct rte_eth_dev *dev)
1399 struct pmd_internals *internals = (struct pmd_internals *)
1400 dev->data->dev_private;
1403 for (i = 0; i < szedata2_ibuf_count; i++) {
1404 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1405 SZEDATA2_MAC_CHMODE_ALL_MULTICAST);
1410 eth_allmulticast_disable(struct rte_eth_dev *dev)
1412 struct pmd_internals *internals = (struct pmd_internals *)
1413 dev->data->dev_private;
1416 for (i = 0; i < szedata2_ibuf_count; i++) {
1417 ibuf_mac_mode_write(ibuf_ptr_by_index(internals->pci_rsc, i),
1418 SZEDATA2_MAC_CHMODE_ONLY_VALID);
1422 static const struct eth_dev_ops ops = {
1423 .dev_start = eth_dev_start,
1424 .dev_stop = eth_dev_stop,
1425 .dev_set_link_up = eth_dev_set_link_up,
1426 .dev_set_link_down = eth_dev_set_link_down,
1427 .dev_close = eth_dev_close,
1428 .dev_configure = eth_dev_configure,
1429 .dev_infos_get = eth_dev_info,
1430 .promiscuous_enable = eth_promiscuous_enable,
1431 .promiscuous_disable = eth_promiscuous_disable,
1432 .allmulticast_enable = eth_allmulticast_enable,
1433 .allmulticast_disable = eth_allmulticast_disable,
1434 .rx_queue_start = eth_rx_queue_start,
1435 .rx_queue_stop = eth_rx_queue_stop,
1436 .tx_queue_start = eth_tx_queue_start,
1437 .tx_queue_stop = eth_tx_queue_stop,
1438 .rx_queue_setup = eth_rx_queue_setup,
1439 .tx_queue_setup = eth_tx_queue_setup,
1440 .rx_queue_release = eth_rx_queue_release,
1441 .tx_queue_release = eth_tx_queue_release,
1442 .link_update = eth_link_update,
1443 .stats_get = eth_stats_get,
1444 .stats_reset = eth_stats_reset,
1445 .mac_addr_set = eth_mac_addr_set,
1449 * This function goes through sysfs and looks for an index of szedata2
1450 * device file (/dev/szedataIIX, where X is the index).
1457 get_szedata2_index(const struct rte_pci_addr *pcislot_addr, uint32_t *index)
1460 struct dirent *entry;
1464 char pcislot_path[PATH_MAX];
1470 dir = opendir("/sys/class/combo");
1475 * Iterate through all combosixX directories.
1476 * When the value in /sys/class/combo/combosixX/device/pcislot
1477 * file is the location of the ethernet device dev, "X" is the
1478 * index of the device.
1480 while ((entry = readdir(dir)) != NULL) {
1481 ret = sscanf(entry->d_name, "combosix%u", &tmp_index);
1485 snprintf(pcislot_path, PATH_MAX,
1486 "/sys/class/combo/combosix%u/device/pcislot",
1489 fd = fopen(pcislot_path, "r");
1493 ret = fscanf(fd, "%8" SCNx32 ":%2" SCNx8 ":%2" SCNx8 ".%" SCNx8,
1494 &domain, &bus, &devid, &function);
1499 if (pcislot_addr->domain == domain &&
1500 pcislot_addr->bus == bus &&
1501 pcislot_addr->devid == devid &&
1502 pcislot_addr->function == function) {
1514 rte_szedata2_eth_dev_init(struct rte_eth_dev *dev)
1516 struct rte_eth_dev_data *data = dev->data;
1517 struct pmd_internals *internals = (struct pmd_internals *)
1519 struct szedata *szedata_temp;
1521 uint32_t szedata2_index;
1522 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1523 struct rte_pci_addr *pci_addr = &pci_dev->addr;
1524 struct rte_mem_resource *pci_rsc =
1525 &pci_dev->mem_resource[PCI_RESOURCE_NUMBER];
1526 char rsc_filename[PATH_MAX];
1527 void *pci_resource_ptr = NULL;
1530 PMD_INIT_LOG(INFO, "Initializing szedata2 device (" PCI_PRI_FMT ")",
1531 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1532 pci_addr->function);
1534 internals->dev = dev;
1536 /* Get index of szedata2 device file and create path to device file */
1537 ret = get_szedata2_index(pci_addr, &szedata2_index);
1539 PMD_INIT_LOG(ERR, "Failed to get szedata2 device index!");
1542 snprintf(internals->sze_dev, PATH_MAX, SZEDATA2_DEV_PATH_FMT,
1545 PMD_INIT_LOG(INFO, "SZEDATA2 path: %s", internals->sze_dev);
1548 * Get number of available DMA RX and TX channels, which is maximum
1549 * number of queues that can be created and store it in private device
1552 szedata_temp = szedata_open(internals->sze_dev);
1553 if (szedata_temp == NULL) {
1554 PMD_INIT_LOG(ERR, "szedata_open(): failed to open %s",
1555 internals->sze_dev);
1558 internals->max_rx_queues = szedata_ifaces_available(szedata_temp,
1560 internals->max_tx_queues = szedata_ifaces_available(szedata_temp,
1562 szedata_close(szedata_temp);
1564 PMD_INIT_LOG(INFO, "Available DMA channels RX: %u TX: %u",
1565 internals->max_rx_queues, internals->max_tx_queues);
1567 /* Set rx, tx burst functions */
1568 if (data->scattered_rx == 1)
1569 dev->rx_pkt_burst = eth_szedata2_rx_scattered;
1571 dev->rx_pkt_burst = eth_szedata2_rx;
1572 dev->tx_pkt_burst = eth_szedata2_tx;
1574 /* Set function callbacks for Ethernet API */
1575 dev->dev_ops = &ops;
1577 rte_eth_copy_pci_info(dev, pci_dev);
1579 /* mmap pci resource0 file to rte_mem_resource structure */
1580 if (pci_dev->mem_resource[PCI_RESOURCE_NUMBER].phys_addr ==
1582 PMD_INIT_LOG(ERR, "Missing resource%u file",
1583 PCI_RESOURCE_NUMBER);
1586 snprintf(rsc_filename, PATH_MAX,
1587 "%s/" PCI_PRI_FMT "/resource%u", rte_pci_get_sysfs_path(),
1588 pci_addr->domain, pci_addr->bus,
1589 pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER);
1590 fd = open(rsc_filename, O_RDWR);
1592 PMD_INIT_LOG(ERR, "Could not open file %s", rsc_filename);
1596 pci_resource_ptr = mmap(0,
1597 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len,
1598 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1600 if (pci_resource_ptr == MAP_FAILED) {
1601 PMD_INIT_LOG(ERR, "Could not mmap file %s (fd = %d)",
1605 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr = pci_resource_ptr;
1606 internals->pci_rsc = pci_rsc;
1608 PMD_INIT_LOG(DEBUG, "resource%u phys_addr = 0x%llx len = %llu "
1609 "virt addr = %llx", PCI_RESOURCE_NUMBER,
1610 (unsigned long long)pci_rsc->phys_addr,
1611 (unsigned long long)pci_rsc->len,
1612 (unsigned long long)pci_rsc->addr);
1614 /* Get link state */
1615 eth_link_update(dev, 0);
1617 /* Allocate space for one mac address */
1618 data->mac_addrs = rte_zmalloc(data->name, sizeof(struct ether_addr),
1619 RTE_CACHE_LINE_SIZE);
1620 if (data->mac_addrs == NULL) {
1621 PMD_INIT_LOG(ERR, "Could not alloc space for MAC address!");
1622 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1623 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1627 ether_addr_copy(ð_addr, data->mac_addrs);
1629 /* At initial state COMBO card is in promiscuous mode so disable it */
1630 eth_promiscuous_disable(dev);
1632 PMD_INIT_LOG(INFO, "szedata2 device ("
1633 PCI_PRI_FMT ") successfully initialized",
1634 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1635 pci_addr->function);
1641 rte_szedata2_eth_dev_uninit(struct rte_eth_dev *dev)
1643 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1644 struct rte_pci_addr *pci_addr = &pci_dev->addr;
1646 rte_free(dev->data->mac_addrs);
1647 dev->data->mac_addrs = NULL;
1648 munmap(pci_dev->mem_resource[PCI_RESOURCE_NUMBER].addr,
1649 pci_dev->mem_resource[PCI_RESOURCE_NUMBER].len);
1651 PMD_DRV_LOG(INFO, "szedata2 device ("
1652 PCI_PRI_FMT ") successfully uninitialized",
1653 pci_addr->domain, pci_addr->bus, pci_addr->devid,
1654 pci_addr->function);
1659 static const struct rte_pci_id rte_szedata2_pci_id_table[] = {
1661 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1662 PCI_DEVICE_ID_NETCOPE_COMBO80G)
1665 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1666 PCI_DEVICE_ID_NETCOPE_COMBO100G)
1669 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETCOPE,
1670 PCI_DEVICE_ID_NETCOPE_COMBO100G2)
1677 static int szedata2_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1678 struct rte_pci_device *pci_dev)
1680 return rte_eth_dev_pci_generic_probe(pci_dev,
1681 sizeof(struct pmd_internals), rte_szedata2_eth_dev_init);
1684 static int szedata2_eth_pci_remove(struct rte_pci_device *pci_dev)
1686 return rte_eth_dev_pci_generic_remove(pci_dev,
1687 rte_szedata2_eth_dev_uninit);
1690 static struct rte_pci_driver szedata2_eth_driver = {
1691 .id_table = rte_szedata2_pci_id_table,
1692 .probe = szedata2_eth_pci_probe,
1693 .remove = szedata2_eth_pci_remove,
1696 RTE_PMD_REGISTER_PCI(RTE_SZEDATA2_DRIVER_NAME, szedata2_eth_driver);
1697 RTE_PMD_REGISTER_PCI_TABLE(RTE_SZEDATA2_DRIVER_NAME, rte_szedata2_pci_id_table);
1698 RTE_PMD_REGISTER_KMOD_DEP(RTE_SZEDATA2_DRIVER_NAME,
1699 "* combo6core & combov3 & szedata2 & szedata2_cv3");
1701 RTE_INIT(szedata2_init_log);
1703 szedata2_init_log(void)
1705 szedata2_logtype_init = rte_log_register("pmd.net.szedata2.init");
1706 if (szedata2_logtype_init >= 0)
1707 rte_log_set_level(szedata2_logtype_init, RTE_LOG_NOTICE);
1708 szedata2_logtype_driver = rte_log_register("pmd.net.szedata2.driver");
1709 if (szedata2_logtype_driver >= 0)
1710 rte_log_set_level(szedata2_logtype_driver, RTE_LOG_NOTICE);