4 * Copyright (c) 2015 CESNET
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of CESNET nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_ethdev.h>
43 #include <rte_malloc.h>
44 #include <rte_memcpy.h>
45 #include <rte_kvargs.h>
48 #include "rte_eth_szedata2.h"
50 #define RTE_ETH_SZEDATA2_DEV_PATH_ARG "dev_path"
51 #define RTE_ETH_SZEDATA2_RX_IFACES_ARG "rx_ifaces"
52 #define RTE_ETH_SZEDATA2_TX_IFACES_ARG "tx_ifaces"
54 #define RTE_ETH_SZEDATA2_MAX_RX_QUEUES 32
55 #define RTE_ETH_SZEDATA2_MAX_TX_QUEUES 32
56 #define RTE_ETH_SZEDATA2_TX_LOCK_SIZE (32 * 1024 * 1024)
59 * size of szedata2_packet header with alignment
61 #define RTE_SZE2_PACKET_HEADER_SIZE_ALIGNED 8
63 struct szedata2_rx_queue {
67 struct rte_mempool *mb_pool;
68 volatile uint64_t rx_pkts;
69 volatile uint64_t rx_bytes;
70 volatile uint64_t err_pkts;
73 struct szedata2_tx_queue {
76 volatile uint64_t tx_pkts;
77 volatile uint64_t err_pkts;
78 volatile uint64_t tx_bytes;
81 struct rxtx_szedata2 {
84 uint32_t sze_rx_mask_req;
85 uint32_t sze_tx_mask_req;
89 struct pmd_internals {
90 struct szedata2_rx_queue rx_queue[RTE_ETH_SZEDATA2_MAX_RX_QUEUES];
91 struct szedata2_tx_queue tx_queue[RTE_ETH_SZEDATA2_MAX_TX_QUEUES];
92 unsigned nb_rx_queues;
93 unsigned nb_tx_queues;
102 static const char *valid_arguments[] = {
103 RTE_ETH_SZEDATA2_DEV_PATH_ARG,
104 RTE_ETH_SZEDATA2_RX_IFACES_ARG,
105 RTE_ETH_SZEDATA2_TX_IFACES_ARG,
109 static struct ether_addr eth_addr = {
110 .addr_bytes = { 0x00, 0x11, 0x17, 0x00, 0x00, 0x00 }
112 static const char *drivername = "SZEdata2 PMD";
113 static struct rte_eth_link pmd_link = {
114 .link_speed = ETH_LINK_SPEED_10G,
115 .link_duplex = ETH_LINK_FULL_DUPLEX,
121 count_ones(uint32_t num)
123 num = num - ((num >> 1) & 0x55555555); /* reuse input as temporary */
124 num = (num & 0x33333333) + ((num >> 2) & 0x33333333); /* temp */
125 return (((num + (num >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; /* count */
129 eth_szedata2_rx(void *queue,
130 struct rte_mbuf **bufs,
134 struct rte_mbuf *mbuf;
135 struct szedata2_rx_queue *sze_q = queue;
136 struct rte_pktmbuf_pool_private *mbp_priv;
141 uint16_t packet_size;
142 uint64_t num_bytes = 0;
143 struct szedata *sze = sze_q->sze;
144 uint8_t *header_ptr = NULL; /* header of packet */
145 uint8_t *packet_ptr1 = NULL;
146 uint8_t *packet_ptr2 = NULL;
147 uint16_t packet_len1 = 0;
148 uint16_t packet_len2 = 0;
149 uint16_t hw_data_align;
151 if (unlikely(sze_q->sze == NULL || nb_pkts == 0))
155 * Reads the given number of packets from szedata2 channel given
156 * by queue and copies the packet data into a newly allocated mbuf
159 for (i = 0; i < nb_pkts; i++) {
160 mbuf = rte_pktmbuf_alloc(sze_q->mb_pool);
162 if (unlikely(mbuf == NULL))
165 /* get the next sze packet */
166 if (sze->ct_rx_lck != NULL && !sze->ct_rx_rem_bytes &&
167 sze->ct_rx_lck->next == NULL) {
168 /* unlock old data */
169 szedata_rx_unlock_data(sze_q->sze, sze->ct_rx_lck_orig);
170 sze->ct_rx_lck_orig = NULL;
171 sze->ct_rx_lck = NULL;
174 if (!sze->ct_rx_rem_bytes && sze->ct_rx_lck_orig == NULL) {
175 /* nothing to read, lock new data */
176 sze->ct_rx_lck = szedata_rx_lock_data(sze_q->sze, ~0U);
177 sze->ct_rx_lck_orig = sze->ct_rx_lck;
179 if (sze->ct_rx_lck == NULL) {
180 /* nothing to lock */
181 rte_pktmbuf_free(mbuf);
185 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
186 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len;
188 if (!sze->ct_rx_rem_bytes) {
189 rte_pktmbuf_free(mbuf);
194 if (sze->ct_rx_rem_bytes < RTE_SZE2_PACKET_HEADER_SIZE) {
197 * copy parts of header to merge buffer
199 if (sze->ct_rx_lck->next == NULL) {
200 rte_pktmbuf_free(mbuf);
204 /* copy first part of header */
205 rte_memcpy(sze->ct_rx_buffer, sze->ct_rx_cur_ptr,
206 sze->ct_rx_rem_bytes);
208 /* copy second part of header */
209 sze->ct_rx_lck = sze->ct_rx_lck->next;
210 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
211 rte_memcpy(sze->ct_rx_buffer + sze->ct_rx_rem_bytes,
213 RTE_SZE2_PACKET_HEADER_SIZE -
214 sze->ct_rx_rem_bytes);
216 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE -
217 sze->ct_rx_rem_bytes;
218 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
219 RTE_SZE2_PACKET_HEADER_SIZE +
220 sze->ct_rx_rem_bytes;
222 header_ptr = (uint8_t *)sze->ct_rx_buffer;
225 header_ptr = (uint8_t *)sze->ct_rx_cur_ptr;
226 sze->ct_rx_cur_ptr += RTE_SZE2_PACKET_HEADER_SIZE;
227 sze->ct_rx_rem_bytes -= RTE_SZE2_PACKET_HEADER_SIZE;
230 sg_size = le16toh(*((uint16_t *)header_ptr));
231 hw_size = le16toh(*(((uint16_t *)header_ptr) + 1));
232 packet_size = sg_size -
233 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size);
236 /* checks if packet all right */
238 errx(5, "Zero segsize");
240 /* check sg_size and hwsize */
241 if (hw_size > sg_size - RTE_SZE2_PACKET_HEADER_SIZE) {
242 errx(10, "Hwsize bigger than expected. Segsize: %d, "
243 "hwsize: %d", sg_size, hw_size);
247 RTE_SZE2_ALIGN8(RTE_SZE2_PACKET_HEADER_SIZE + hw_size) -
248 RTE_SZE2_PACKET_HEADER_SIZE;
250 if (sze->ct_rx_rem_bytes >=
252 RTE_SZE2_PACKET_HEADER_SIZE)) {
254 /* one packet ready - go to another */
255 packet_ptr1 = sze->ct_rx_cur_ptr + hw_data_align;
256 packet_len1 = packet_size;
260 sze->ct_rx_cur_ptr += RTE_SZE2_ALIGN8(sg_size) -
261 RTE_SZE2_PACKET_HEADER_SIZE;
262 sze->ct_rx_rem_bytes -= RTE_SZE2_ALIGN8(sg_size) -
263 RTE_SZE2_PACKET_HEADER_SIZE;
266 if (sze->ct_rx_lck->next == NULL) {
267 errx(6, "Need \"next\" lock, "
268 "but it is missing: %u",
269 sze->ct_rx_rem_bytes);
273 if (sze->ct_rx_rem_bytes <= hw_data_align) {
274 uint16_t rem_size = hw_data_align -
275 sze->ct_rx_rem_bytes;
277 /* MOVE to next lock */
278 sze->ct_rx_lck = sze->ct_rx_lck->next;
280 (void *)(((uint8_t *)
281 (sze->ct_rx_lck->start)) + rem_size);
283 packet_ptr1 = sze->ct_rx_cur_ptr;
284 packet_len1 = packet_size;
288 sze->ct_rx_cur_ptr +=
289 RTE_SZE2_ALIGN8(packet_size);
290 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
291 rem_size - RTE_SZE2_ALIGN8(packet_size);
293 /* get pointer and length from first part */
294 packet_ptr1 = sze->ct_rx_cur_ptr +
296 packet_len1 = sze->ct_rx_rem_bytes -
299 /* MOVE to next lock */
300 sze->ct_rx_lck = sze->ct_rx_lck->next;
301 sze->ct_rx_cur_ptr = sze->ct_rx_lck->start;
303 /* get pointer and length from second part */
304 packet_ptr2 = sze->ct_rx_cur_ptr;
305 packet_len2 = packet_size - packet_len1;
307 sze->ct_rx_cur_ptr +=
308 RTE_SZE2_ALIGN8(packet_size) -
310 sze->ct_rx_rem_bytes = sze->ct_rx_lck->len -
311 (RTE_SZE2_ALIGN8(packet_size) -
316 if (unlikely(packet_ptr1 == NULL)) {
317 rte_pktmbuf_free(mbuf);
321 /* get the space available for data in the mbuf */
322 mbp_priv = rte_mempool_get_priv(sze_q->mb_pool);
323 buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
324 RTE_PKTMBUF_HEADROOM);
326 if (packet_size <= buf_size) {
327 /* sze packet will fit in one mbuf, go ahead and copy */
328 rte_memcpy(rte_pktmbuf_mtod(mbuf, void *),
329 packet_ptr1, packet_len1);
330 if (packet_ptr2 != NULL) {
331 rte_memcpy((void *)(rte_pktmbuf_mtod(mbuf,
332 uint8_t *) + packet_len1),
333 packet_ptr2, packet_len2);
335 mbuf->data_len = (uint16_t)packet_size;
337 mbuf->pkt_len = packet_size;
338 mbuf->port = sze_q->in_port;
341 num_bytes += packet_size;
344 * sze packet will not fit in one mbuf,
345 * scattered mode is not enabled, drop packet
348 "SZE segment %d bytes will not fit in one mbuf "
349 "(%d bytes), scattered mode is not enabled, "
351 packet_size, buf_size);
352 rte_pktmbuf_free(mbuf);
356 sze_q->rx_pkts += num_rx;
357 sze_q->rx_bytes += num_bytes;
362 init_rx_channels(struct rte_eth_dev *dev, int v)
364 struct pmd_internals *internals = dev->data->dev_private;
367 uint32_t count = internals->num_of_rx;
368 uint32_t num_sub = 0;
373 rx = internals->sze_rx_req;
376 for (i = 0; i < count; i++) {
378 * Open, subscribe rx,tx channels and start device
381 RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n", i);
383 internals->rx_queue[num_sub].sze =
384 szedata_open(internals->sze_dev);
385 if (internals->rx_queue[num_sub].sze == NULL)
388 /* separate least significant non-zero bit */
389 x = rx & ((~rx) + 1);
392 RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
393 "tx channel: 0x%x\n", x, tx);
395 ret = szedata_subscribe3(internals->rx_queue[num_sub].sze,
398 szedata_close(internals->rx_queue[num_sub].sze);
399 internals->rx_queue[num_sub].sze = NULL;
404 RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
405 "tx channel: 0x%x\n", x, tx);
409 RTE_LOG(INFO, PMD, "Starting SZE device for "
410 "rx queue: %u\n", num_sub);
412 ret = szedata_start(internals->rx_queue[num_sub].sze);
414 szedata_close(internals->rx_queue[num_sub].sze);
415 internals->rx_queue[num_sub].sze = NULL;
420 * set to 1 all bits lower than bit set to 1
424 internals->rx_queue[num_sub].rx_channel =
428 RTE_LOG(INFO, PMD, "Subscribed rx channel "
430 internals->rx_queue[num_sub].rx_channel
434 internals->nb_rx_queues = num_sub;
438 "Could not subscribe any rx channel. "
439 "Closing SZE device\n");
441 szedata_close(internals->rx_queue[num_sub].sze);
442 internals->rx_queue[num_sub].sze = NULL;
445 /* set least significant non-zero bit to zero */
449 dev->data->nb_rx_queues = (uint16_t)num_sub;
452 RTE_LOG(INFO, PMD, "Successfully opened rx channels: %u\n",
459 init_tx_channels(struct rte_eth_dev *dev, int v)
461 struct pmd_internals *internals = dev->data->dev_private;
464 uint32_t count = internals->num_of_tx;
465 uint32_t num_sub = 0;
471 tx = internals->sze_tx_req;
473 for (i = 0; i < count; i++) {
475 * Open, subscribe rx,tx channels and start device
478 RTE_LOG(INFO, PMD, "Opening SZE device %u. time\n",
479 i + internals->num_of_rx);
481 internals->tx_queue[num_sub].sze =
482 szedata_open(internals->sze_dev);
483 if (internals->tx_queue[num_sub].sze == NULL)
486 /* separate least significant non-zero bit */
487 x = tx & ((~tx) + 1);
490 RTE_LOG(INFO, PMD, "Subscribing rx channel: 0x%x "
491 "tx channel: 0x%x\n", rx, x);
493 ret = szedata_subscribe3(internals->tx_queue[num_sub].sze,
496 szedata_close(internals->tx_queue[num_sub].sze);
497 internals->tx_queue[num_sub].sze = NULL;
502 RTE_LOG(INFO, PMD, "Subscribed rx channel: 0x%x "
503 "tx channel: 0x%x\n", rx, x);
507 RTE_LOG(INFO, PMD, "Starting SZE device for "
508 "tx queue: %u\n", num_sub);
510 ret = szedata_start(internals->tx_queue[num_sub].sze);
512 szedata_close(internals->tx_queue[num_sub].sze);
513 internals->tx_queue[num_sub].sze = NULL;
518 * set to 1 all bits lower than bit set to 1
522 internals->tx_queue[num_sub].tx_channel =
526 RTE_LOG(INFO, PMD, "Subscribed tx channel "
528 internals->tx_queue[num_sub].tx_channel
532 internals->nb_tx_queues = num_sub;
536 "Could not subscribe any tx channel. "
537 "Closing SZE device\n");
539 szedata_close(internals->tx_queue[num_sub].sze);
540 internals->tx_queue[num_sub].sze = NULL;
543 /* set least significant non-zero bit to zero */
547 dev->data->nb_tx_queues = (uint16_t)num_sub;
550 RTE_LOG(INFO, PMD, "Successfully opened tx channels: %u\n",
557 close_rx_channels(struct rte_eth_dev *dev)
559 struct pmd_internals *internals = dev->data->dev_private;
561 uint32_t num_sub = internals->nb_rx_queues;
563 for (i = 0; i < num_sub; i++) {
564 if (internals->rx_queue[i].sze != NULL) {
565 szedata_close(internals->rx_queue[i].sze);
566 internals->rx_queue[i].sze = NULL;
569 /* set number of rx queues to zero */
570 internals->nb_rx_queues = 0;
571 dev->data->nb_rx_queues = (uint16_t)0;
575 close_tx_channels(struct rte_eth_dev *dev)
577 struct pmd_internals *internals = dev->data->dev_private;
579 uint32_t num_sub = internals->nb_tx_queues;
581 for (i = 0; i < num_sub; i++) {
582 if (internals->tx_queue[i].sze != NULL) {
583 szedata_close(internals->tx_queue[i].sze);
584 internals->tx_queue[i].sze = NULL;
587 /* set number of rx queues to zero */
588 internals->nb_tx_queues = 0;
589 dev->data->nb_tx_queues = (uint16_t)0;
593 eth_dev_start(struct rte_eth_dev *dev)
595 struct pmd_internals *internals = dev->data->dev_private;
598 if (internals->nb_rx_queues == 0) {
599 ret = init_rx_channels(dev, 0);
601 close_rx_channels(dev);
606 if (internals->nb_tx_queues == 0) {
607 ret = init_tx_channels(dev, 0);
609 close_tx_channels(dev);
610 close_rx_channels(dev);
615 dev->data->dev_link.link_status = 1;
620 eth_dev_stop(struct rte_eth_dev *dev)
623 struct pmd_internals *internals = dev->data->dev_private;
625 for (i = 0; i < internals->nb_rx_queues; i++) {
626 if (internals->rx_queue[i].sze != NULL) {
627 szedata_close(internals->rx_queue[i].sze);
628 internals->rx_queue[i].sze = NULL;
632 for (i = 0; i < internals->nb_tx_queues; i++) {
633 if (internals->tx_queue[i].sze != NULL) {
634 szedata_close(internals->tx_queue[i].sze);
635 internals->tx_queue[i].sze = NULL;
639 internals->nb_rx_queues = 0;
640 internals->nb_tx_queues = 0;
642 dev->data->nb_rx_queues = (uint16_t)0;
643 dev->data->nb_tx_queues = (uint16_t)0;
645 dev->data->dev_link.link_status = 0;
649 eth_dev_configure(struct rte_eth_dev *dev __rte_unused)
655 eth_dev_info(struct rte_eth_dev *dev,
656 struct rte_eth_dev_info *dev_info)
658 struct pmd_internals *internals = dev->data->dev_private;
659 dev_info->driver_name = drivername;
660 dev_info->if_index = internals->if_index;
661 dev_info->max_mac_addrs = 1;
662 dev_info->max_rx_pktlen = (uint32_t)-1;
663 dev_info->max_rx_queues = (uint16_t)internals->nb_rx_queues;
664 dev_info->max_tx_queues = (uint16_t)internals->nb_tx_queues;
665 dev_info->min_rx_bufsize = 0;
666 dev_info->pci_dev = NULL;
670 eth_stats_get(struct rte_eth_dev *dev,
671 struct rte_eth_stats *stats)
674 uint64_t rx_total = 0;
675 uint64_t tx_total = 0;
676 uint64_t tx_err_total = 0;
677 uint64_t rx_total_bytes = 0;
678 uint64_t tx_total_bytes = 0;
679 const struct pmd_internals *internal = dev->data->dev_private;
681 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
682 i < internal->nb_rx_queues; i++) {
683 stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts;
684 stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes;
685 rx_total += stats->q_ipackets[i];
686 rx_total_bytes += stats->q_ibytes[i];
689 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS &&
690 i < internal->nb_tx_queues; i++) {
691 stats->q_opackets[i] = internal->tx_queue[i].tx_pkts;
692 stats->q_errors[i] = internal->tx_queue[i].err_pkts;
693 stats->q_obytes[i] = internal->tx_queue[i].tx_bytes;
694 tx_total += stats->q_opackets[i];
695 tx_err_total += stats->q_errors[i];
696 tx_total_bytes += stats->q_obytes[i];
699 stats->ipackets = rx_total;
700 stats->opackets = tx_total;
701 stats->ibytes = rx_total_bytes;
702 stats->obytes = tx_total_bytes;
703 stats->oerrors = tx_err_total;
707 eth_stats_reset(struct rte_eth_dev *dev)
710 struct pmd_internals *internal = dev->data->dev_private;
711 for (i = 0; i < internal->nb_rx_queues; i++) {
712 internal->rx_queue[i].rx_pkts = 0;
713 internal->rx_queue[i].rx_bytes = 0;
715 for (i = 0; i < internal->nb_tx_queues; i++) {
716 internal->tx_queue[i].tx_pkts = 0;
717 internal->tx_queue[i].err_pkts = 0;
718 internal->tx_queue[i].tx_bytes = 0;
723 eth_dev_close(struct rte_eth_dev *dev)
726 struct pmd_internals *internals = dev->data->dev_private;
728 for (i = 0; i < internals->nb_rx_queues; i++) {
729 if (internals->rx_queue[i].sze != NULL) {
730 szedata_close(internals->rx_queue[i].sze);
731 internals->rx_queue[i].sze = NULL;
735 for (i = 0; i < internals->nb_tx_queues; i++) {
736 if (internals->tx_queue[i].sze != NULL) {
737 szedata_close(internals->tx_queue[i].sze);
738 internals->tx_queue[i].sze = NULL;
742 internals->nb_rx_queues = 0;
743 internals->nb_tx_queues = 0;
745 dev->data->nb_rx_queues = (uint16_t)0;
746 dev->data->nb_tx_queues = (uint16_t)0;
750 eth_queue_release(void *q __rte_unused)
755 eth_link_update(struct rte_eth_dev *dev __rte_unused,
756 int wait_to_complete __rte_unused)
762 eth_rx_queue_setup(struct rte_eth_dev *dev,
763 uint16_t rx_queue_id,
764 uint16_t nb_rx_desc __rte_unused,
765 unsigned int socket_id __rte_unused,
766 const struct rte_eth_rxconf *rx_conf __rte_unused,
767 struct rte_mempool *mb_pool)
769 struct pmd_internals *internals = dev->data->dev_private;
770 struct szedata2_rx_queue *szedata2_q =
771 &internals->rx_queue[rx_queue_id];
772 szedata2_q->mb_pool = mb_pool;
773 dev->data->rx_queues[rx_queue_id] = szedata2_q;
774 szedata2_q->in_port = dev->data->port_id;
779 eth_tx_queue_setup(struct rte_eth_dev *dev,
780 uint16_t tx_queue_id,
781 uint16_t nb_tx_desc __rte_unused,
782 unsigned int socket_id __rte_unused,
783 const struct rte_eth_txconf *tx_conf __rte_unused)
785 struct pmd_internals *internals = dev->data->dev_private;
786 dev->data->tx_queues[tx_queue_id] = &internals->tx_queue[tx_queue_id];
791 eth_mac_addr_set(struct rte_eth_dev *dev __rte_unused,
792 struct ether_addr *mac_addr __rte_unused)
796 static struct eth_dev_ops ops = {
797 .dev_start = eth_dev_start,
798 .dev_stop = eth_dev_stop,
799 .dev_close = eth_dev_close,
800 .dev_configure = eth_dev_configure,
801 .dev_infos_get = eth_dev_info,
802 .rx_queue_setup = eth_rx_queue_setup,
803 .tx_queue_setup = eth_tx_queue_setup,
804 .rx_queue_release = eth_queue_release,
805 .tx_queue_release = eth_queue_release,
806 .link_update = eth_link_update,
807 .stats_get = eth_stats_get,
808 .stats_reset = eth_stats_reset,
809 .mac_addr_set = eth_mac_addr_set,
813 parse_mask(const char *mask_str, uint32_t *mask_num)
818 value = strtol(mask_str, &endptr, 0);
819 if (*endptr != '\0' || value > UINT32_MAX || value < 0)
822 *mask_num = (uint32_t)value;
827 add_rx_mask(const char *key __rte_unused, const char *value, void *extra_args)
829 struct rxtx_szedata2 *szedata2 = extra_args;
832 if (parse_mask(value, &mask) != 0)
835 szedata2->sze_rx_mask_req |= mask;
840 add_tx_mask(const char *key __rte_unused, const char *value, void *extra_args)
842 struct rxtx_szedata2 *szedata2 = extra_args;
845 if (parse_mask(value, &mask) != 0)
848 szedata2->sze_tx_mask_req |= mask;
853 rte_pmd_init_internals(const char *name, const unsigned nb_rx_queues,
854 const unsigned nb_tx_queues,
855 const unsigned numa_node,
856 struct pmd_internals **internals,
857 struct rte_eth_dev **eth_dev)
859 struct rte_eth_dev_data *data = NULL;
862 "Creating szedata2-backed ethdev on numa socket %u\n",
866 * now do all data allocation - for eth_dev structure
867 * and internal (private) data
869 data = rte_zmalloc_socket(name, sizeof(*data), 0, numa_node);
873 *internals = rte_zmalloc_socket(name, sizeof(**internals), 0,
875 if (*internals == NULL)
878 /* reserve an ethdev entry */
879 *eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL);
880 if (*eth_dev == NULL)
884 * now put it all together
885 * - store queue data in internals,
886 * - store numa_node info in pci_driver
887 * - point eth_dev_data to internals
888 * - and point eth_dev structure to new eth_dev_data structure
890 * NOTE: we'll replace the data element, of originally allocated eth_dev
891 * so the rings are local per-process
894 (*internals)->nb_rx_queues = nb_rx_queues;
895 (*internals)->nb_tx_queues = nb_tx_queues;
897 (*internals)->if_index = 0;
899 data->dev_private = *internals;
900 data->port_id = (*eth_dev)->data->port_id;
901 snprintf(data->name, sizeof(data->name), "%s", (*eth_dev)->data->name);
902 data->nb_rx_queues = (uint16_t)nb_rx_queues;
903 data->nb_tx_queues = (uint16_t)nb_tx_queues;
904 data->dev_link = pmd_link;
905 data->mac_addrs = ð_addr;
907 (*eth_dev)->data = data;
908 (*eth_dev)->dev_ops = &ops;
909 (*eth_dev)->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
910 (*eth_dev)->driver = NULL;
911 (*eth_dev)->data->kdrv = RTE_KDRV_NONE;
912 (*eth_dev)->data->drv_name = drivername;
913 (*eth_dev)->data->numa_node = numa_node;
919 rte_free(*internals);
924 rte_eth_from_szedata2(const char *name,
925 struct rxtx_szedata2 *szedata2,
926 const unsigned numa_node)
928 struct pmd_internals *internals = NULL;
929 struct rte_eth_dev *eth_dev = NULL;
932 if (rte_pmd_init_internals(name, 0, 0, numa_node,
933 &internals, ð_dev) < 0)
936 internals->sze_dev = szedata2->sze_dev;
937 internals->sze_rx_req = szedata2->sze_rx_mask_req;
938 internals->sze_tx_req = szedata2->sze_tx_mask_req;
939 internals->num_of_rx = szedata2->num_of_rx;
940 internals->num_of_tx = szedata2->num_of_tx;
942 RTE_LOG(INFO, PMD, "Number of rx channels to open: %u mask: 0x%x\n",
943 internals->num_of_rx, internals->sze_rx_req);
944 RTE_LOG(INFO, PMD, "Number of tx channels to open: %u mask: 0x%x\n",
945 internals->num_of_tx, internals->sze_tx_req);
947 ret = init_rx_channels(eth_dev, 1);
949 close_rx_channels(eth_dev);
953 ret = init_tx_channels(eth_dev, 1);
955 close_tx_channels(eth_dev);
956 close_rx_channels(eth_dev);
960 eth_dev->rx_pkt_burst = eth_szedata2_rx;
961 eth_dev->tx_pkt_burst = NULL;
968 rte_pmd_szedata2_devinit(const char *name, const char *params)
972 struct rte_kvargs *kvlist;
974 struct rte_kvargs_pair *pair = NULL;
975 struct rxtx_szedata2 szedata2 = { 0, 0, 0, 0, NULL };
976 bool dev_path_missing = true;
978 RTE_LOG(INFO, PMD, "Initializing pmd_szedata2 for %s\n", name);
980 numa_node = rte_socket_id();
982 kvlist = rte_kvargs_parse(params, valid_arguments);
987 * Get szedata2 device path and rx,tx channels from passed arguments.
990 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != 1)
993 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG) < 1)
996 if (rte_kvargs_count(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG) < 1)
999 for (k_idx = 0; k_idx < kvlist->count; k_idx++) {
1000 pair = &kvlist->pairs[k_idx];
1001 if (strstr(pair->key, RTE_ETH_SZEDATA2_DEV_PATH_ARG) != NULL) {
1002 szedata2.sze_dev = pair->value;
1003 dev_path_missing = false;
1008 if (dev_path_missing)
1011 ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_RX_IFACES_ARG,
1012 &add_rx_mask, &szedata2);
1016 ret = rte_kvargs_process(kvlist, RTE_ETH_SZEDATA2_TX_IFACES_ARG,
1017 &add_tx_mask, &szedata2);
1021 szedata2.num_of_rx = count_ones(szedata2.sze_rx_mask_req);
1022 szedata2.num_of_tx = count_ones(szedata2.sze_tx_mask_req);
1024 RTE_LOG(INFO, PMD, "SZE device found at path %s\n", szedata2.sze_dev);
1026 return rte_eth_from_szedata2(name, &szedata2, numa_node);
1028 rte_kvargs_free(kvlist);
1033 rte_pmd_szedata2_devuninit(const char *name)
1035 struct rte_eth_dev *dev = NULL;
1037 RTE_LOG(INFO, PMD, "Uninitializing pmd_szedata2 for %s "
1038 "on numa socket %u\n", name, rte_socket_id());
1043 dev = rte_eth_dev_allocated(name);
1047 rte_free(dev->data->dev_private);
1048 rte_free(dev->data);
1049 rte_eth_dev_release_port(dev);
1053 static struct rte_driver pmd_szedata2_drv = {
1054 .name = "eth_szedata2",
1056 .init = rte_pmd_szedata2_devinit,
1057 .uninit = rte_pmd_szedata2_devuninit,
1060 PMD_REGISTER_DRIVER(pmd_szedata2_drv);