4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
41 #include <rte_bus_vdev.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
45 #include <rte_sched.h>
46 #include <rte_tm_driver.h>
48 #include "rte_eth_softnic.h"
49 #include "rte_eth_softnic_internals.h"
52 (&rte_eth_devices[p->hard.port_id])
54 #define PMD_PARAM_SOFT_TM "soft_tm"
55 #define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
56 #define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
57 #define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
58 #define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
59 #define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
60 #define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
61 #define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
62 #define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
64 #define PMD_PARAM_HARD_NAME "hard_name"
65 #define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
67 static const char *pmd_valid_args[] = {
69 PMD_PARAM_SOFT_TM_RATE,
70 PMD_PARAM_SOFT_TM_NB_QUEUES,
71 PMD_PARAM_SOFT_TM_QSIZE0,
72 PMD_PARAM_SOFT_TM_QSIZE1,
73 PMD_PARAM_SOFT_TM_QSIZE2,
74 PMD_PARAM_SOFT_TM_QSIZE3,
75 PMD_PARAM_SOFT_TM_ENQ_BSZ,
76 PMD_PARAM_SOFT_TM_DEQ_BSZ,
78 PMD_PARAM_HARD_TX_QUEUE_ID,
82 static const struct rte_eth_dev_info pmd_dev_info = {
84 .max_rx_pktlen = UINT32_MAX,
85 .max_rx_queues = UINT16_MAX,
86 .max_tx_queues = UINT16_MAX,
100 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
101 struct rte_eth_dev_info *dev_info)
103 memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
107 pmd_dev_configure(struct rte_eth_dev *dev)
109 struct pmd_internals *p = dev->data->dev_private;
110 struct rte_eth_dev *hard_dev = DEV_HARD(p);
112 if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
115 if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
122 pmd_rx_queue_setup(struct rte_eth_dev *dev,
123 uint16_t rx_queue_id,
124 uint16_t nb_rx_desc __rte_unused,
125 unsigned int socket_id,
126 const struct rte_eth_rxconf *rx_conf __rte_unused,
127 struct rte_mempool *mb_pool __rte_unused)
129 struct pmd_internals *p = dev->data->dev_private;
131 if (p->params.soft.intrusive == 0) {
132 struct pmd_rx_queue *rxq;
134 rxq = rte_zmalloc_socket(p->params.soft.name,
135 sizeof(struct pmd_rx_queue), 0, socket_id);
139 rxq->hard.port_id = p->hard.port_id;
140 rxq->hard.rx_queue_id = rx_queue_id;
141 dev->data->rx_queues[rx_queue_id] = rxq;
143 struct rte_eth_dev *hard_dev = DEV_HARD(p);
144 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
149 dev->data->rx_queues[rx_queue_id] = rxq;
155 pmd_tx_queue_setup(struct rte_eth_dev *dev,
156 uint16_t tx_queue_id,
158 unsigned int socket_id,
159 const struct rte_eth_txconf *tx_conf __rte_unused)
161 uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
165 snprintf(name, sizeof(name), "%s_txq%04x",
166 dev->data->name, tx_queue_id);
167 r = rte_ring_create(name, nb_tx_desc, socket_id,
168 RING_F_SP_ENQ | RING_F_SC_DEQ);
172 dev->data->tx_queues[tx_queue_id] = r;
177 pmd_dev_start(struct rte_eth_dev *dev)
179 struct pmd_internals *p = dev->data->dev_private;
182 int status = tm_start(p);
188 dev->data->dev_link.link_status = ETH_LINK_UP;
190 if (p->params.soft.intrusive) {
191 struct rte_eth_dev *hard_dev = DEV_HARD(p);
193 /* The hard_dev->rx_pkt_burst should be stable by now */
194 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
201 pmd_dev_stop(struct rte_eth_dev *dev)
203 struct pmd_internals *p = dev->data->dev_private;
205 dev->data->dev_link.link_status = ETH_LINK_DOWN;
212 pmd_dev_close(struct rte_eth_dev *dev)
217 for (i = 0; i < dev->data->nb_tx_queues; i++)
218 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
222 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
223 int wait_to_complete __rte_unused)
229 pmd_tm_ops_get(struct rte_eth_dev *dev, void *arg)
231 *(const struct rte_tm_ops **)arg =
232 (tm_enabled(dev)) ? &pmd_tm_ops : NULL;
237 static const struct eth_dev_ops pmd_ops = {
238 .dev_configure = pmd_dev_configure,
239 .dev_start = pmd_dev_start,
240 .dev_stop = pmd_dev_stop,
241 .dev_close = pmd_dev_close,
242 .link_update = pmd_link_update,
243 .dev_infos_get = pmd_dev_infos_get,
244 .rx_queue_setup = pmd_rx_queue_setup,
245 .tx_queue_setup = pmd_tx_queue_setup,
246 .tm_ops_get = pmd_tm_ops_get,
250 pmd_rx_pkt_burst(void *rxq,
251 struct rte_mbuf **rx_pkts,
254 struct pmd_rx_queue *rx_queue = rxq;
256 return rte_eth_rx_burst(rx_queue->hard.port_id,
257 rx_queue->hard.rx_queue_id,
263 pmd_tx_pkt_burst(void *txq,
264 struct rte_mbuf **tx_pkts,
267 return (uint16_t)rte_ring_enqueue_burst(txq,
273 static __rte_always_inline int
274 run_default(struct rte_eth_dev *dev)
276 struct pmd_internals *p = dev->data->dev_private;
278 /* Persistent context: Read Only (update not required) */
279 struct rte_mbuf **pkts = p->soft.def.pkts;
280 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
282 /* Persistent context: Read - Write (update required) */
283 uint32_t txq_pos = p->soft.def.txq_pos;
284 uint32_t pkts_len = p->soft.def.pkts_len;
285 uint32_t flush_count = p->soft.def.flush_count;
287 /* Not part of the persistent context */
291 /* Soft device TXQ read, Hard device TXQ write */
292 for (i = 0; i < nb_tx_queues; i++) {
293 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
295 /* Read soft device TXQ burst to packet enqueue buffer */
296 pkts_len += rte_ring_sc_dequeue_burst(txq,
297 (void **)&pkts[pkts_len],
301 /* Increment soft device TXQ */
303 if (txq_pos >= nb_tx_queues)
306 /* Hard device TXQ write when complete burst is available */
307 if (pkts_len >= DEFAULT_BURST_SIZE) {
308 for (pos = 0; pos < pkts_len; )
309 pos += rte_eth_tx_burst(p->hard.port_id,
310 p->params.hard.tx_queue_id,
312 (uint16_t)(pkts_len - pos));
320 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
321 for (pos = 0; pos < pkts_len; )
322 pos += rte_eth_tx_burst(p->hard.port_id,
323 p->params.hard.tx_queue_id,
325 (uint16_t)(pkts_len - pos));
331 p->soft.def.txq_pos = txq_pos;
332 p->soft.def.pkts_len = pkts_len;
333 p->soft.def.flush_count = flush_count + 1;
338 static __rte_always_inline int
339 run_tm(struct rte_eth_dev *dev)
341 struct pmd_internals *p = dev->data->dev_private;
343 /* Persistent context: Read Only (update not required) */
344 struct rte_sched_port *sched = p->soft.tm.sched;
345 struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
346 struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
347 uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
348 uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
349 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
351 /* Persistent context: Read - Write (update required) */
352 uint32_t txq_pos = p->soft.tm.txq_pos;
353 uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
354 uint32_t flush_count = p->soft.tm.flush_count;
356 /* Not part of the persistent context */
357 uint32_t pkts_deq_len, pos;
360 /* Soft device TXQ read, TM enqueue */
361 for (i = 0; i < nb_tx_queues; i++) {
362 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
364 /* Read TXQ burst to packet enqueue buffer */
365 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
366 (void **)&pkts_enq[pkts_enq_len],
372 if (txq_pos >= nb_tx_queues)
375 /* TM enqueue when complete burst is available */
376 if (pkts_enq_len >= enq_bsz) {
377 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
385 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
387 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
393 p->soft.tm.txq_pos = txq_pos;
394 p->soft.tm.pkts_enq_len = pkts_enq_len;
395 p->soft.tm.flush_count = flush_count + 1;
397 /* TM dequeue, Hard device TXQ write */
398 pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
400 for (pos = 0; pos < pkts_deq_len; )
401 pos += rte_eth_tx_burst(p->hard.port_id,
402 p->params.hard.tx_queue_id,
404 (uint16_t)(pkts_deq_len - pos));
410 rte_pmd_softnic_run(uint16_t port_id)
412 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
414 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
415 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
418 return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
421 static struct ether_addr eth_addr = { .addr_bytes = {0} };
424 eth_dev_speed_max_mbps(uint32_t speed_capa)
426 uint32_t rate_mbps[32] = {
444 uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
445 return rate_mbps[pos];
449 default_init(struct pmd_internals *p,
450 struct pmd_params *params,
453 p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
454 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
458 if (p->soft.def.pkts == NULL)
465 default_free(struct pmd_internals *p)
467 rte_free(p->soft.def.pkts);
471 pmd_init(struct pmd_params *params, int numa_node)
473 struct pmd_internals *p;
476 p = rte_zmalloc_socket(params->soft.name,
477 sizeof(struct pmd_internals),
483 memcpy(&p->params, params, sizeof(p->params));
484 rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
487 status = default_init(p, params, numa_node);
489 free(p->params.hard.name);
494 /* Traffic Management (TM)*/
495 if (params->soft.flags & PMD_FEATURE_TM) {
496 status = tm_init(p, params, numa_node);
499 free(p->params.hard.name);
509 pmd_free(struct pmd_internals *p)
511 if (p->params.soft.flags & PMD_FEATURE_TM)
516 free(p->params.hard.name);
521 pmd_ethdev_register(struct rte_vdev_device *vdev,
522 struct pmd_params *params,
525 struct rte_eth_dev_info hard_info;
526 struct rte_eth_dev *soft_dev;
529 uint16_t hard_port_id;
531 rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
532 rte_eth_dev_info_get(hard_port_id, &hard_info);
533 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
534 numa_node = rte_eth_dev_socket_id(hard_port_id);
536 /* Ethdev entry allocation */
537 soft_dev = rte_eth_dev_allocate(params->soft.name);
542 soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
543 NULL : /* set up later */
545 soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
546 soft_dev->tx_pkt_prepare = NULL;
547 soft_dev->dev_ops = &pmd_ops;
548 soft_dev->device = &vdev->device;
551 soft_dev->data->dev_private = dev_private;
552 soft_dev->data->dev_link.link_speed = hard_speed;
553 soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
554 soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
555 soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
556 soft_dev->data->mac_addrs = ð_addr;
557 soft_dev->data->promiscuous = 1;
558 soft_dev->data->kdrv = RTE_KDRV_NONE;
559 soft_dev->data->numa_node = numa_node;
565 get_string(const char *key __rte_unused, const char *value, void *extra_args)
567 if (!value || !extra_args)
570 *(char **)extra_args = strdup(value);
572 if (!*(char **)extra_args)
579 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
581 if (!value || !extra_args)
584 *(uint32_t *)extra_args = strtoull(value, NULL, 0);
590 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
592 struct rte_kvargs *kvlist;
595 kvlist = rte_kvargs_parse(params, pmd_valid_args);
599 /* Set default values */
600 memset(p, 0, sizeof(*p));
602 p->soft.intrusive = INTRUSIVE;
604 p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
605 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
606 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
607 p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
608 p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
609 p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
611 /* SOFT: TM (optional) */
612 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
615 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
620 if (strcmp(s, "on") == 0)
621 p->soft.flags |= PMD_FEATURE_TM;
622 else if (strcmp(s, "off") == 0)
623 p->soft.flags &= ~PMD_FEATURE_TM;
632 /* SOFT: TM rate (measured in bytes/second) (optional) */
633 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
634 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
635 &get_uint32, &p->soft.tm.rate);
639 p->soft.flags |= PMD_FEATURE_TM;
642 /* SOFT: TM number of queues (optional) */
643 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
644 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
645 &get_uint32, &p->soft.tm.nb_queues);
649 p->soft.flags |= PMD_FEATURE_TM;
652 /* SOFT: TM queue size 0 .. 3 (optional) */
653 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
656 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
657 &get_uint32, &qsize);
661 p->soft.tm.qsize[0] = (uint16_t)qsize;
662 p->soft.flags |= PMD_FEATURE_TM;
665 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
668 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
669 &get_uint32, &qsize);
673 p->soft.tm.qsize[1] = (uint16_t)qsize;
674 p->soft.flags |= PMD_FEATURE_TM;
677 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
680 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
681 &get_uint32, &qsize);
685 p->soft.tm.qsize[2] = (uint16_t)qsize;
686 p->soft.flags |= PMD_FEATURE_TM;
689 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
692 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
693 &get_uint32, &qsize);
697 p->soft.tm.qsize[3] = (uint16_t)qsize;
698 p->soft.flags |= PMD_FEATURE_TM;
701 /* SOFT: TM enqueue burst size (optional) */
702 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
703 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
704 &get_uint32, &p->soft.tm.enq_bsz);
708 p->soft.flags |= PMD_FEATURE_TM;
711 /* SOFT: TM dequeue burst size (optional) */
712 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
713 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
714 &get_uint32, &p->soft.tm.deq_bsz);
718 p->soft.flags |= PMD_FEATURE_TM;
721 /* HARD: name (mandatory) */
722 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
723 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
724 &get_string, &p->hard.name);
732 /* HARD: tx_queue_id (optional) */
733 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
734 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
735 &get_uint32, &p->hard.tx_queue_id);
741 rte_kvargs_free(kvlist);
746 pmd_probe(struct rte_vdev_device *vdev)
752 struct rte_eth_dev_info hard_info;
754 uint16_t hard_port_id;
759 "Probing device \"%s\"\n",
760 rte_vdev_device_name(vdev));
762 /* Parse input arguments */
763 params = rte_vdev_device_args(vdev);
767 status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
771 /* Check input arguments */
772 if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
775 rte_eth_dev_info_get(hard_port_id, &hard_info);
776 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
777 numa_node = rte_eth_dev_socket_id(hard_port_id);
779 if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
782 if (p.soft.flags & PMD_FEATURE_TM) {
783 status = tm_params_check(&p, hard_speed);
789 /* Allocate and initialize soft ethdev private data */
790 dev_private = pmd_init(&p, numa_node);
791 if (dev_private == NULL)
794 /* Register soft ethdev */
796 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
797 p.soft.name, p.hard.name);
799 status = pmd_ethdev_register(vdev, &p, dev_private);
801 pmd_free(dev_private);
809 pmd_remove(struct rte_vdev_device *vdev)
811 struct rte_eth_dev *dev = NULL;
812 struct pmd_internals *p;
817 RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
818 rte_vdev_device_name(vdev));
820 /* Find the ethdev entry */
821 dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
824 p = dev->data->dev_private;
826 /* Free device data structures*/
829 rte_eth_dev_release_port(dev);
834 static struct rte_vdev_driver pmd_softnic_drv = {
836 .remove = pmd_remove,
839 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
840 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
841 PMD_PARAM_SOFT_TM "=on|off "
842 PMD_PARAM_SOFT_TM_RATE "=<int> "
843 PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
844 PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
845 PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
846 PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
847 PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
848 PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
849 PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
850 PMD_PARAM_HARD_NAME "=<string> "
851 PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");