4 * Copyright (c) 2015 CESNET
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of CESNET nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_ethdev.h>
43 #include <rte_malloc.h>
44 #include <rte_memcpy.h>
45 #include <rte_kvargs.h>
48 #include "rte_eth_szedata2.h"
50 #define RTE_ETH_SZEDATA2_DEV_PATH_ARG "dev_path"
51 #define RTE_ETH_SZEDATA2_RX_IFACES_ARG "rx_ifaces"
52 #define RTE_ETH_SZEDATA2_TX_IFACES_ARG "tx_ifaces"
54 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
55 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
56 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
59 * size of szedata2_packet header with alignment
61 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
63 struct szedata2_rx_queue {
67 struct rte_mempool *mb_pool;
68 volatile uint64_t rx_pkts;
69 volatile uint64_t rx_bytes;
70 volatile uint64_t err_pkts;
73 struct szedata2_tx_queue {
76 volatile uint64_t tx_pkts;
77 volatile uint64_t err_pkts;
78 volatile uint64_t tx_bytes;
81 struct rxtx_szedata2 {
84 uint32_t sze_rx_mask_req;
85 uint32_t sze_tx_mask_req;
89 struct pmd_internals {
90 struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
91 struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
92 unsigned nb_rx_queues;
93 unsigned nb_tx_queues;
102 static const char *valid_arguments[] = {
103 RTE_ETH_SZEDATA2_DEV_PATH_ARG,
104 RTE_ETH_SZEDATA2_RX_IFACES_ARG,
105 RTE_ETH_SZEDATA2_TX_IFACES_ARG,
109 static struct ether_addr eth_addr = {
110 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
112 static const char *drivername = "SZEdata2 PMD";
113 static struct rte_eth_link pmd_link = {
114 .link_speed = ETH_LINK_SPEED_10G,
115 .link_duplex = ETH_LINK_FULL_DUPLEX,
121 count_ones(uint32_t num)
123 num = num - ((num >> 1) & 0x55555555); /* reuse input as temporary */
124 num = (num & 0x33333333) + ((num >> 2) & 0x33333333); /* temp */
125 return (((num + (num >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; /* count */
129 eth_szedata2_rx(void *queue,
130 struct rte_mbuf **bufs,
134 struct rte_mbuf *mbuf;
135 struct szedata2_rx_queue *sze_q = queue;
136 struct rte_pktmbuf_pool_private *mbp_priv;
141 uint16_t packet_size;
142 uint64_t num_bytes = 0;
143 struct szedata *sze = sze_q->sze;
144 uint8_t *header_ptr = NULL; /* header of packet */
145 uint8_t *packet_ptr1 = NULL;
146 uint8_t *packet_ptr2 = NULL;
147 uint16_t packet_len1 = 0;
148 uint16_t packet_len2 = 0;
149 uint16_t hw_data_align;
151 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
155 * Reads the given number of packets from szedata2 channel given
156 * by queue and copies the packet data into a newly allocated mbuf
159 for (i = 0; i < nb_pkts; i++) {
160 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
162 if (unlikely(mbuf == NULL))
165 /* get the next sze packet */
166 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
167 sze->ct_rx_lck->next == NULL) {
168 /* unlock old data */
169 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
170 sze->ct_rx_lck_orig = NULL;
171 sze->ct_rx_lck = NULL;
174 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
175 /* nothing to read, lock new data */
176 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
177 sze->ct_rx_lck_orig = sze->ct_rx_lck;
179 if (sze->ct_rx_lck == NULL) {
180 /* nothing to lock */
181 rte_pktmbuf_free(mbuf);
185 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
186 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
188 if (!sze->ct_rx_rem_bytes) {
189 rte_pktmbuf_free(mbuf);
194 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
197 * copy parts of header to merge buffer
199 if (sze->ct_rx_lck->next == NULL) {
200 rte_pktmbuf_free(mbuf);
204 /* copy first part of header */
205 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
206 sze->ct_rx_rem_bytes);
208 /* copy second part of header */
209 sze->ct_rx_lck = sze->ct_rx_lck->next;
210 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
211 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
213 RTE_SZE2_PACKET_HEADER_SIZE -
214 sze->ct_rx_rem_bytes);
216 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
217 sze->ct_rx_rem_bytes;
218 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
219 RTE_SZE2_PACKET_HEADER_SIZE +
220 sze->ct_rx_rem_bytes;
222 header_ptr = (uint8_t *)sze->ct_rx_buffer;
225 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
226 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
227 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
230 sg_size = le16toh(*((uint16_t *)header_ptr));
231 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
232 packet_size = sg_size -
233 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
236 /* checks if packet all right */
238 errx(5, "Zero segsize");
240 /* check sg_size and hwsize */
241 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
242 errx(10, "Hwsize bigger than expected. Segsize: %d, "
243 "hwsize: %d", sg_size, hw_size);
247 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
248 RTE_SZE2_PACKET_HEADER_SIZE;
250 if (sze->ct_rx_rem_bytes >=
252 RTE_SZE2_PACKET_HEADER_SIZE)) {
254 /* one packet ready - go to another */
255 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
256 packet_len1 = packet_size;
260 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
261 RTE_SZE2_PACKET_HEADER_SIZE;
262 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
263 RTE_SZE2_PACKET_HEADER_SIZE;
266 if (sze->ct_rx_lck->next == NULL) {
267 errx(6, "Need \"next\" lock, "
268 "but it is missing: %u",
269 sze->ct_rx_rem_bytes);
273 if (sze->ct_rx_rem_bytes <= hw_data_align) {
274 uint16_t rem_size = hw_data_align -
275 sze->ct_rx_rem_bytes;
277 /* MOVE to next lock */
278 sze->ct_rx_lck = sze->ct_rx_lck->next;
280 (void *)(((uint8_t *)
281 (sze->ct_rx_lck->start)) + rem_size);
283 packet_ptr1 = sze->ct_rx_cur_ptr;
284 packet_len1 = packet_size;
288 sze->ct_rx_cur_ptr +=
289 RTE_SZE2_ALIGN8(packet_size);
290 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
291 rem_size - RTE_SZE2_ALIGN8(packet_size);
293 /* get pointer and length from first part */
294 packet_ptr1 = sze->ct_rx_cur_ptr +
296 packet_len1 = sze->ct_rx_rem_bytes -
299 /* MOVE to next lock */
300 sze->ct_rx_lck = sze->ct_rx_lck->next;
301 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
303 /* get pointer and length from second part */
304 packet_ptr2 = sze->ct_rx_cur_ptr;
305 packet_len2 = packet_size - packet_len1;
307 sze->ct_rx_cur_ptr +=
308 RTE_SZE2_ALIGN8(packet_size) -
310 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
311 (RTE_SZE2_ALIGN8(packet_size) -
316 if (unlikely(packet_ptr1 == NULL)) {
317 rte_pktmbuf_free(mbuf);
321 /* get the space available for data in the mbuf */
322 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
323 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
324 RTE_PKTMBUF_HEADROOM);
326 if (packet_size <= buf_size) {
327 /* sze packet will fit in one mbuf, go ahead and copy */
328 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
329 packet_ptr1, packet_len1);
330 if (packet_ptr2 != NULL) {
331 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
332 uint8_t *) + packet_len1),
333 packet_ptr2, packet_len2);
335 mbuf->data_len = (uint16_t)packet_size;
337 mbuf->pkt_len = packet_size;
338 mbuf->port = sze_q->in_port;
341 num_bytes += packet_size;
344 * sze packet will not fit in one mbuf,
345 * scattered mode is not enabled, drop packet
348 "SZE segment %d bytes will not fit in one mbuf "
349 "(%d bytes), scattered mode is not enabled, "
351 packet_size, buf_size);
352 rte_pktmbuf_free(mbuf);
356 sze_q->rx_pkts += num_rx;
357 sze_q->rx_bytes += num_bytes;
362 eth_szedata2_tx(void *queue,
363 struct rte_mbuf **bufs,
366 struct rte_mbuf *mbuf;
367 struct szedata2_tx_queue *sze_q = queue;
369 uint64_t num_bytes = 0;
371 const struct szedata_lock *lck;
377 uint32_t unlock_size;
380 uint16_t pkt_left = nb_pkts;
382 if (sze_q->sze == NULL || nb_pkts == 0)
385 while (pkt_left > 0) {
387 lck = szedata_tx_lock_data(sze_q->sze,
388 RTE_ETH_SZEDATA2_TX_LOCK_SIZE,
394 lock_size = lck->len;
395 lock_size2 = lck->next ? lck->next->len : 0;
398 mbuf = bufs[nb_pkts - pkt_left];
400 pkt_len = mbuf->pkt_len;
401 mbuf_segs = mbuf->nb_segs;
403 hwpkt_len = RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
404 RTE_SZE2_ALIGN8(pkt_len);
406 if (lock_size + lock_size2 < hwpkt_len) {
407 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
411 num_bytes += pkt_len;
413 if (lock_size > hwpkt_len) {
418 /* write packet length at first 2 bytes in 8B header */
419 *((uint16_t *)dst) = htole16(
420 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
422 *(((uint16_t *)dst) + 1) = htole16(0);
424 /* copy packet from mbuf */
425 tmp_dst = ((uint8_t *)(dst)) +
426 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
427 if (mbuf_segs == 1) {
429 * non-scattered packet,
430 * transmit from one mbuf
433 rte_pktmbuf_mtod(mbuf, const void *),
436 /* scattered packet, transmit from more mbufs */
437 struct rte_mbuf *m = mbuf;
443 tmp_dst = ((uint8_t *)(tmp_dst)) +
450 dst = ((uint8_t *)dst) + hwpkt_len;
451 unlock_size += hwpkt_len;
452 lock_size -= hwpkt_len;
454 rte_pktmbuf_free(mbuf);
458 szedata_tx_unlock_data(sze_q->sze, lck,
463 } else if (lock_size + lock_size2 >= hwpkt_len) {
467 /* write packet length at first 2 bytes in 8B header */
469 htole16(RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED +
471 *(((uint16_t *)dst) + 1) = htole16(0);
474 * If the raw packet (pkt_len) is smaller than lock_size
475 * get the correct length for memcpy
478 pkt_len < lock_size -
479 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED ?
481 lock_size - RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
483 rem_len = hwpkt_len - lock_size;
485 tmp_dst = ((uint8_t *)(dst)) +
486 RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED;
487 if (mbuf_segs == 1) {
489 * non-scattered packet,
490 * transmit from one mbuf
492 /* copy part of packet to first area */
494 rte_pktmbuf_mtod(mbuf, const void *),
498 dst = lck->next->start;
500 /* copy part of packet to second area */
502 (const void *)(rte_pktmbuf_mtod(mbuf,
504 write_len), pkt_len - write_len);
506 /* scattered packet, transmit from more mbufs */
507 struct rte_mbuf *m = mbuf;
508 uint16_t written = 0;
509 uint16_t to_write = 0;
510 bool new_mbuf = true;
511 uint16_t write_off = 0;
513 /* copy part of packet to first area */
514 while (m && written < write_len) {
515 to_write = RTE_MIN(m->data_len,
516 write_len - written);
522 tmp_dst = ((uint8_t *)(tmp_dst)) +
524 if (m->data_len <= write_len -
535 dst = lck->next->start;
539 write_off = new_mbuf ? 0 : to_write;
541 /* copy part of packet to second area */
542 while (m && written < pkt_len - write_len) {
543 rte_memcpy(tmp_dst, (const void *)
545 uint8_t *) + write_off),
546 m->data_len - write_off);
548 tmp_dst = ((uint8_t *)(tmp_dst)) +
549 (m->data_len - write_off);
550 written += m->data_len - write_off;
556 dst = ((uint8_t *)dst) + rem_len;
557 unlock_size += hwpkt_len;
558 lock_size = lock_size2 - rem_len;
561 rte_pktmbuf_free(mbuf);
565 szedata_tx_unlock_data(sze_q->sze, lck, unlock_size);
569 sze_q->tx_pkts += num_tx;
570 sze_q->err_pkts += nb_pkts - num_tx;
571 sze_q->tx_bytes += num_bytes;
576 init_rx_channels(struct rte_eth_dev *dev, int v)
578 struct pmd_internals *internals = dev->data->dev_private;
581 uint32_t count = internals->num_of_rx;
582 uint32_t num_sub = 0;
587 rx = internals->sze_rx_req;
590 for (i = 0; i < count; i++) {
592 * Open, subscribe rx,tx channels and start device
595 RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n", i);
597 internals->rx_queue[num_sub].sze =
598 szedata_open(internals->sze_dev);
599 if (internals->rx_queue[num_sub].sze == NULL)
602 /* separate least significant non-zero bit */
603 x = rx & ((~rx) + 1);
606 RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
607 "tx channel: 0x%x\n", x, tx);
609 ret = szedata_subscribe3(internals->rx_queue[num_sub].sze,
612 szedata_close(internals->rx_queue[num_sub].sze);
613 internals->rx_queue[num_sub].sze = NULL;
618 RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
619 "tx channel: 0x%x\n", x, tx);
623 RTE_LOG(INFO, PMD, "Starting SZE device for "
624 "rx queue: %u\n", num_sub);
626 ret = szedata_start(internals->rx_queue[num_sub].sze);
628 szedata_close(internals->rx_queue[num_sub].sze);
629 internals->rx_queue[num_sub].sze = NULL;
634 * set to 1 all bits lower than bit set to 1
638 internals->rx_queue[num_sub].rx_channel =
642 RTE_LOG(INFO, PMD, "Subscribed rx channel "
644 internals->rx_queue[num_sub].rx_channel
648 internals->nb_rx_queues = num_sub;
652 "Could not subscribe any rx channel. "
653 "Closing SZE device\n");
655 szedata_close(internals->rx_queue[num_sub].sze);
656 internals->rx_queue[num_sub].sze = NULL;
659 /* set least significant non-zero bit to zero */
663 dev->data->nb_rx_queues = (uint16_t)num_sub;
666 RTE_LOG(INFO, PMD, "Successfully opened rx channels: %u\n",
673 init_tx_channels(struct rte_eth_dev *dev, int v)
675 struct pmd_internals *internals = dev->data->dev_private;
678 uint32_t count = internals->num_of_tx;
679 uint32_t num_sub = 0;
685 tx = internals->sze_tx_req;
687 for (i = 0; i < count; i++) {
689 * Open, subscribe rx,tx channels and start device
692 RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n",
693 i + internals->num_of_rx);
695 internals->tx_queue[num_sub].sze =
696 szedata_open(internals->sze_dev);
697 if (internals->tx_queue[num_sub].sze == NULL)
700 /* separate least significant non-zero bit */
701 x = tx & ((~tx) + 1);
704 RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
705 "tx channel: 0x%x\n", rx, x);
707 ret = szedata_subscribe3(internals->tx_queue[num_sub].sze,
710 szedata_close(internals->tx_queue[num_sub].sze);
711 internals->tx_queue[num_sub].sze = NULL;
716 RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
717 "tx channel: 0x%x\n", rx, x);
721 RTE_LOG(INFO, PMD, "Starting SZE device for "
722 "tx queue: %u\n", num_sub);
724 ret = szedata_start(internals->tx_queue[num_sub].sze);
726 szedata_close(internals->tx_queue[num_sub].sze);
727 internals->tx_queue[num_sub].sze = NULL;
732 * set to 1 all bits lower than bit set to 1
736 internals->tx_queue[num_sub].tx_channel =
740 RTE_LOG(INFO, PMD, "Subscribed tx channel "
742 internals->tx_queue[num_sub].tx_channel
746 internals->nb_tx_queues = num_sub;
750 "Could not subscribe any tx channel. "
751 "Closing SZE device\n");
753 szedata_close(internals->tx_queue[num_sub].sze);
754 internals->tx_queue[num_sub].sze = NULL;
757 /* set least significant non-zero bit to zero */
761 dev->data->nb_tx_queues = (uint16_t)num_sub;
764 RTE_LOG(INFO, PMD, "Successfully opened tx channels: %u\n",
771 close_rx_channels(struct rte_eth_dev *dev)
773 struct pmd_internals *internals = dev->data->dev_private;
775 uint32_t num_sub = internals->nb_rx_queues;
777 for (i = 0; i < num_sub; i++) {
778 if (internals->rx_queue[i].sze != NULL) {
779 szedata_close(internals->rx_queue[i].sze);
780 internals->rx_queue[i].sze = NULL;
783 /* set number of rx queues to zero */
784 internals->nb_rx_queues = 0;
785 dev->data->nb_rx_queues = (uint16_t)0;
789 close_tx_channels(struct rte_eth_dev *dev)
791 struct pmd_internals *internals = dev->data->dev_private;
793 uint32_t num_sub = internals->nb_tx_queues;
795 for (i = 0; i < num_sub; i++) {
796 if (internals->tx_queue[i].sze != NULL) {
797 szedata_close(internals->tx_queue[i].sze);
798 internals->tx_queue[i].sze = NULL;
801 /* set number of rx queues to zero */
802 internals->nb_tx_queues = 0;
803 dev->data->nb_tx_queues = (uint16_t)0;
807 eth_dev_start(struct rte_eth_dev *dev)
809 struct pmd_internals *internals = dev->data->dev_private;
812 if (internals->nb_rx_queues == 0) {
813 ret = init_rx_channels(dev, 0);
815 close_rx_channels(dev);
820 if (internals->nb_tx_queues == 0) {
821 ret = init_tx_channels(dev, 0);
823 close_tx_channels(dev);
824 close_rx_channels(dev);
829 dev->data->dev_link.link_status = 1;
834 eth_dev_stop(struct rte_eth_dev *dev)
837 struct pmd_internals *internals = dev->data->dev_private;
839 for (i = 0; i < internals->nb_rx_queues; i++) {
840 if (internals->rx_queue[i].sze != NULL) {
841 szedata_close(internals->rx_queue[i].sze);
842 internals->rx_queue[i].sze = NULL;
846 for (i = 0; i < internals->nb_tx_queues; i++) {
847 if (internals->tx_queue[i].sze != NULL) {
848 szedata_close(internals->tx_queue[i].sze);
849 internals->tx_queue[i].sze = NULL;
853 internals->nb_rx_queues = 0;
854 internals->nb_tx_queues = 0;
856 dev->data->nb_rx_queues = (uint16_t)0;
857 dev->data->nb_tx_queues = (uint16_t)0;
859 dev->data->dev_link.link_status = 0;
863 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
869 eth_dev_info(struct rte_eth_dev *dev,
870 struct rte_eth_dev_info *dev_info)
872 struct pmd_internals *internals = dev->data->dev_private;
873 dev_info->driver_name = drivername;
874 dev_info->if_index = internals->if_index;
875 dev_info->max_mac_addrs = 1;
876 dev_info->max_rx_pktlen = (uint32_t)-1;
877 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
878 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
879 dev_info->min_rx_bufsize = 0;
880 dev_info->pci_dev = NULL;
884 eth_stats_get(struct rte_eth_dev *dev,
885 struct rte_eth_stats *stats)
888 uint64_t rx_total = 0;
889 uint64_t tx_total = 0;
890 uint64_t tx_err_total = 0;
891 uint64_t rx_total_bytes = 0;
892 uint64_t tx_total_bytes = 0;
893 const struct pmd_internals *internal = dev->data->dev_private;
895 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
896 i < internal->nb_rx_queues; i++) {
897 stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
898 stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
899 rx_total += stats->q_ipackets[i];
900 rx_total_bytes += stats->q_ibytes[i];
903 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
904 i < internal->nb_tx_queues; i++) {
905 stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
906 stats->q_errors[i] = internal->tx_queue[i].err_pkts;
907 stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
908 tx_total += stats->q_opackets[i];
909 tx_err_total += stats->q_errors[i];
910 tx_total_bytes += stats->q_obytes[i];
913 stats->ipackets = rx_total;
914 stats->opackets = tx_total;
915 stats->ibytes = rx_total_bytes;
916 stats->obytes = tx_total_bytes;
917 stats->oerrors = tx_err_total;
921 eth_stats_reset(struct rte_eth_dev *dev)
924 struct pmd_internals *internal = dev->data->dev_private;
925 for (i = 0; i < internal->nb_rx_queues; i++) {
926 internal->rx_queue[i].rx_pkts = 0;
927 internal->rx_queue[i].rx_bytes = 0;
929 for (i = 0; i < internal->nb_tx_queues; i++) {
930 internal->tx_queue[i].tx_pkts = 0;
931 internal->tx_queue[i].err_pkts = 0;
932 internal->tx_queue[i].tx_bytes = 0;
937 eth_dev_close(struct rte_eth_dev *dev)
940 struct pmd_internals *internals = dev->data->dev_private;
942 for (i = 0; i < internals->nb_rx_queues; i++) {
943 if (internals->rx_queue[i].sze != NULL) {
944 szedata_close(internals->rx_queue[i].sze);
945 internals->rx_queue[i].sze = NULL;
949 for (i = 0; i < internals->nb_tx_queues; i++) {
950 if (internals->tx_queue[i].sze != NULL) {
951 szedata_close(internals->tx_queue[i].sze);
952 internals->tx_queue[i].sze = NULL;
956 internals->nb_rx_queues = 0;
957 internals->nb_tx_queues = 0;
959 dev->data->nb_rx_queues = (uint16_t)0;
960 dev->data->nb_tx_queues = (uint16_t)0;
964 eth_queue_release(void *q __rte_unused)
969 eth_link_update(struct rte_eth_dev *dev __rte_unused,
970 int wait_to_complete __rte_unused)
976 eth_rx_queue_setup(struct rte_eth_dev *dev,
977 uint16_t rx_queue_id,
978 uint16_t nb_rx_desc __rte_unused,
979 unsigned int socket_id __rte_unused,
980 const struct rte_eth_rxconf *rx_conf __rte_unused,
981 struct rte_mempool *mb_pool)
983 struct pmd_internals *internals = dev->data->dev_private;
984 struct szedata2_rx_queue *szedata2_q =
985 &internals->rx_queue[rx_queue_id];
986 szedata2_q->mb_pool = mb_pool;
987 dev->data->rx_queues[rx_queue_id] = szedata2_q;
988 szedata2_q->in_port = dev->data->port_id;
993 eth_tx_queue_setup(struct rte_eth_dev *dev,
994 uint16_t tx_queue_id,
995 uint16_t nb_tx_desc __rte_unused,
996 unsigned int socket_id __rte_unused,
997 const struct rte_eth_txconf *tx_conf __rte_unused)
999 struct pmd_internals *internals = dev->data->dev_private;
1000 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
1005 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
1006 struct ether_addr *mac_addr __rte_unused)
1010 static struct eth_dev_ops ops = {
1011 .dev_start = eth_dev_start,
1012 .dev_stop = eth_dev_stop,
1013 .dev_close = eth_dev_close,
1014 .dev_configure = eth_dev_configure,
1015 .dev_infos_get = eth_dev_info,
1016 .rx_queue_setup = eth_rx_queue_setup,
1017 .tx_queue_setup = eth_tx_queue_setup,
1018 .rx_queue_release = eth_queue_release,
1019 .tx_queue_release = eth_queue_release,
1020 .link_update = eth_link_update,
1021 .stats_get = eth_stats_get,
1022 .stats_reset = eth_stats_reset,
1023 .mac_addr_set = eth_mac_addr_set,
1027 parse_mask(const char *mask_str, uint32_t *mask_num)
1032 value = strtol(mask_str, &endptr, 0);
1033 if (*endptr != '\0' || value > UINT32_MAX || value < 0)
1036 *mask_num = (uint32_t)value;
1041 add_rx_mask(const char *key __rte_unused, const char *value, void *extra_args)
1043 struct rxtx_szedata2 *szedata2 = extra_args;
1046 if (parse_mask(value, &mask) != 0)
1049 szedata2->sze_rx_mask_req |= mask;
1054 add_tx_mask(const char *key __rte_unused, const char *value, void *extra_args)
1056 struct rxtx_szedata2 *szedata2 = extra_args;
1059 if (parse_mask(value, &mask) != 0)
1062 szedata2->sze_tx_mask_req |= mask;
1067 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
1068 const unsigned nb_tx_queues,
1069 const unsigned numa_node,
1070 struct pmd_internals **internals,
1071 struct rte_eth_dev **eth_dev)
1073 struct rte_eth_dev_data *data = NULL;
1076 "Creating szedata2-backed ethdev on numa socket %u\n",
1080 * now do all data allocation - for eth_dev structure
1081 * and internal (private) data
1083 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
1087 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
1089 if (*internals == NULL)
1092 /* reserve an ethdev entry */
1093 *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
1094 if (*eth_dev == NULL)
1098 * now put it all together
1099 * - store queue data in internals,
1100 * - store numa_node info in pci_driver
1101 * - point eth_dev_data to internals
1102 * - and point eth_dev structure to new eth_dev_data structure
1104 * NOTE: we'll replace the data element, of originally allocated eth_dev
1105 * so the rings are local per-process
1108 (*internals)->nb_rx_queues = nb_rx_queues;
1109 (*internals)->nb_tx_queues = nb_tx_queues;
1111 (*internals)->if_index = 0;
1113 data->dev_private = *internals;
1114 data->port_id = (*eth_dev)->data->port_id;
1115 snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
1116 data->nb_rx_queues = (uint16_t)nb_rx_queues;
1117 data->nb_tx_queues = (uint16_t)nb_tx_queues;
1118 data->dev_link = pmd_link;
1119 data->mac_addrs = ð_addr;
1121 (*eth_dev)->data = data;
1122 (*eth_dev)->dev_ops = &ops;
1123 (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
1124 (*eth_dev)->driver = NULL;
1125 (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
1126 (*eth_dev)->data->drv_name = drivername;
1127 (*eth_dev)->data->numa_node = numa_node;
1133 rte_free(*internals);
1138 rte_eth_from_szedata2(const char *name,
1139 struct rxtx_szedata2 *szedata2,
1140 const unsigned numa_node)
1142 struct pmd_internals *internals = NULL;
1143 struct rte_eth_dev *eth_dev = NULL;
1146 if (rte_pmd_init_internals(name, 0, 0, numa_node,
1147 &internals, ð_dev) < 0)
1150 internals->sze_dev = szedata2->sze_dev;
1151 internals->sze_rx_req = szedata2->sze_rx_mask_req;
1152 internals->sze_tx_req = szedata2->sze_tx_mask_req;
1153 internals->num_of_rx = szedata2->num_of_rx;
1154 internals->num_of_tx = szedata2->num_of_tx;
1156 RTE_LOG(INFO, PMD, "Number of rx channels to open: %u mask: 0x%x\n",
1157 internals->num_of_rx, internals->sze_rx_req);
1158 RTE_LOG(INFO, PMD, "Number of tx channels to open: %u mask: 0x%x\n",
1159 internals->num_of_tx, internals->sze_tx_req);
1161 ret = init_rx_channels(eth_dev, 1);
1163 close_rx_channels(eth_dev);
1167 ret = init_tx_channels(eth_dev, 1);
1169 close_tx_channels(eth_dev);
1170 close_rx_channels(eth_dev);
1174 eth_dev->rx_pkt_burst = eth_szedata2_rx;
1175 eth_dev->tx_pkt_burst = eth_szedata2_tx;
1182 rte_pmd_szedata2_devinit(const char *name, const char *params)
1186 struct rte_kvargs *kvlist;
1188 struct rte_kvargs_pair *pair = NULL;
1189 struct rxtx_szedata2 szedata2 = { 0, 0, 0, 0, NULL };
1190 bool dev_path_missing = true;
1192 RTE_LOG(INFO, PMD, "Initializing pmd_szedata2 for %s\n", name);
1194 numa_node = rte_socket_id();
1196 kvlist = rte_kvargs_parse(params, valid_arguments);
1201 * Get szedata2 device path and rx,tx channels from passed arguments.
1204 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != 1)
1207 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG) < 1)
1210 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG) < 1)
1213 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
1214 pair = &kvlist->pairs[k_idx];
1215 if (strstr(pair->key, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != NULL) {
1216 szedata2.sze_dev = pair->value;
1217 dev_path_missing = false;
1222 if (dev_path_missing)
1225 ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG,
1226 &add_rx_mask, &szedata2);
1230 ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG,
1231 &add_tx_mask, &szedata2);
1235 szedata2.num_of_rx = count_ones(szedata2.sze_rx_mask_req);
1236 szedata2.num_of_tx = count_ones(szedata2.sze_tx_mask_req);
1238 RTE_LOG(INFO, PMD, "SZE device found at path %s\n", szedata2.sze_dev);
1240 return rte_eth_from_szedata2(name, &szedata2, numa_node);
1242 rte_kvargs_free(kvlist);
1247 rte_pmd_szedata2_devuninit(const char *name)
1249 struct rte_eth_dev *dev = NULL;
1251 RTE_LOG(INFO, PMD, "Uninitializing pmd_szedata2 for %s "
1252 "on numa socket %u\n", name, rte_socket_id());
1257 dev = rte_eth_dev_allocated(name);
1261 rte_free(dev->data->dev_private);
1262 rte_free(dev->data);
1263 rte_eth_dev_release_port(dev);
1267 static struct rte_driver pmd_szedata2_drv = {
1268 .name = "eth_szedata2",
1270 .init = rte_pmd_szedata2_devinit,
1271 .uninit = rte_pmd_szedata2_devuninit,
1274 PMD_REGISTER_DRIVER(pmd_szedata2_drv);