4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_ethdev.h>
39 #include <rte_ethdev_vdev.h>
40 #include <rte_malloc.h>
42 #include <rte_kvargs.h>
43 #include <rte_errno.h>
45 #include <rte_sched.h>
47 #include "rte_eth_softnic.h"
48 #include "rte_eth_softnic_internals.h"
51 (&rte_eth_devices[p->hard.port_id])
53 #define PMD_PARAM_SOFT_TM "soft_tm"
54 #define PMD_PARAM_SOFT_TM_RATE "soft_tm_rate"
55 #define PMD_PARAM_SOFT_TM_NB_QUEUES "soft_tm_nb_queues"
56 #define PMD_PARAM_SOFT_TM_QSIZE0 "soft_tm_qsize0"
57 #define PMD_PARAM_SOFT_TM_QSIZE1 "soft_tm_qsize1"
58 #define PMD_PARAM_SOFT_TM_QSIZE2 "soft_tm_qsize2"
59 #define PMD_PARAM_SOFT_TM_QSIZE3 "soft_tm_qsize3"
60 #define PMD_PARAM_SOFT_TM_ENQ_BSZ "soft_tm_enq_bsz"
61 #define PMD_PARAM_SOFT_TM_DEQ_BSZ "soft_tm_deq_bsz"
63 #define PMD_PARAM_HARD_NAME "hard_name"
64 #define PMD_PARAM_HARD_TX_QUEUE_ID "hard_tx_queue_id"
66 static const char *pmd_valid_args[] = {
68 PMD_PARAM_SOFT_TM_RATE,
69 PMD_PARAM_SOFT_TM_NB_QUEUES,
70 PMD_PARAM_SOFT_TM_QSIZE0,
71 PMD_PARAM_SOFT_TM_QSIZE1,
72 PMD_PARAM_SOFT_TM_QSIZE2,
73 PMD_PARAM_SOFT_TM_QSIZE3,
74 PMD_PARAM_SOFT_TM_ENQ_BSZ,
75 PMD_PARAM_SOFT_TM_DEQ_BSZ,
77 PMD_PARAM_HARD_TX_QUEUE_ID,
81 static const struct rte_eth_dev_info pmd_dev_info = {
83 .max_rx_pktlen = UINT32_MAX,
84 .max_rx_queues = UINT16_MAX,
85 .max_tx_queues = UINT16_MAX,
99 pmd_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
100 struct rte_eth_dev_info *dev_info)
102 memcpy(dev_info, &pmd_dev_info, sizeof(*dev_info));
106 pmd_dev_configure(struct rte_eth_dev *dev)
108 struct pmd_internals *p = dev->data->dev_private;
109 struct rte_eth_dev *hard_dev = DEV_HARD(p);
111 if (dev->data->nb_rx_queues > hard_dev->data->nb_rx_queues)
114 if (p->params.hard.tx_queue_id >= hard_dev->data->nb_tx_queues)
121 pmd_rx_queue_setup(struct rte_eth_dev *dev,
122 uint16_t rx_queue_id,
123 uint16_t nb_rx_desc __rte_unused,
124 unsigned int socket_id,
125 const struct rte_eth_rxconf *rx_conf __rte_unused,
126 struct rte_mempool *mb_pool __rte_unused)
128 struct pmd_internals *p = dev->data->dev_private;
130 if (p->params.soft.intrusive == 0) {
131 struct pmd_rx_queue *rxq;
133 rxq = rte_zmalloc_socket(p->params.soft.name,
134 sizeof(struct pmd_rx_queue), 0, socket_id);
138 rxq->hard.port_id = p->hard.port_id;
139 rxq->hard.rx_queue_id = rx_queue_id;
140 dev->data->rx_queues[rx_queue_id] = rxq;
142 struct rte_eth_dev *hard_dev = DEV_HARD(p);
143 void *rxq = hard_dev->data->rx_queues[rx_queue_id];
148 dev->data->rx_queues[rx_queue_id] = rxq;
154 pmd_tx_queue_setup(struct rte_eth_dev *dev,
155 uint16_t tx_queue_id,
157 unsigned int socket_id,
158 const struct rte_eth_txconf *tx_conf __rte_unused)
160 uint32_t size = RTE_ETH_NAME_MAX_LEN + strlen("_txq") + 4;
164 snprintf(name, sizeof(name), "%s_txq%04x",
165 dev->data->name, tx_queue_id);
166 r = rte_ring_create(name, nb_tx_desc, socket_id,
167 RING_F_SP_ENQ | RING_F_SC_DEQ);
171 dev->data->tx_queues[tx_queue_id] = r;
176 pmd_dev_start(struct rte_eth_dev *dev)
178 struct pmd_internals *p = dev->data->dev_private;
181 int status = tm_start(p);
187 dev->data->dev_link.link_status = ETH_LINK_UP;
189 if (p->params.soft.intrusive) {
190 struct rte_eth_dev *hard_dev = DEV_HARD(p);
192 /* The hard_dev->rx_pkt_burst should be stable by now */
193 dev->rx_pkt_burst = hard_dev->rx_pkt_burst;
200 pmd_dev_stop(struct rte_eth_dev *dev)
202 struct pmd_internals *p = dev->data->dev_private;
204 dev->data->dev_link.link_status = ETH_LINK_DOWN;
211 pmd_dev_close(struct rte_eth_dev *dev)
216 for (i = 0; i < dev->data->nb_tx_queues; i++)
217 rte_ring_free((struct rte_ring *)dev->data->tx_queues[i]);
221 pmd_link_update(struct rte_eth_dev *dev __rte_unused,
222 int wait_to_complete __rte_unused)
227 static const struct eth_dev_ops pmd_ops = {
228 .dev_configure = pmd_dev_configure,
229 .dev_start = pmd_dev_start,
230 .dev_stop = pmd_dev_stop,
231 .dev_close = pmd_dev_close,
232 .link_update = pmd_link_update,
233 .dev_infos_get = pmd_dev_infos_get,
234 .rx_queue_setup = pmd_rx_queue_setup,
235 .tx_queue_setup = pmd_tx_queue_setup,
240 pmd_rx_pkt_burst(void *rxq,
241 struct rte_mbuf **rx_pkts,
244 struct pmd_rx_queue *rx_queue = rxq;
246 return rte_eth_rx_burst(rx_queue->hard.port_id,
247 rx_queue->hard.rx_queue_id,
253 pmd_tx_pkt_burst(void *txq,
254 struct rte_mbuf **tx_pkts,
257 return (uint16_t)rte_ring_enqueue_burst(txq,
263 static __rte_always_inline int
264 run_default(struct rte_eth_dev *dev)
266 struct pmd_internals *p = dev->data->dev_private;
268 /* Persistent context: Read Only (update not required) */
269 struct rte_mbuf **pkts = p->soft.def.pkts;
270 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
272 /* Persistent context: Read - Write (update required) */
273 uint32_t txq_pos = p->soft.def.txq_pos;
274 uint32_t pkts_len = p->soft.def.pkts_len;
275 uint32_t flush_count = p->soft.def.flush_count;
277 /* Not part of the persistent context */
281 /* Soft device TXQ read, Hard device TXQ write */
282 for (i = 0; i < nb_tx_queues; i++) {
283 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
285 /* Read soft device TXQ burst to packet enqueue buffer */
286 pkts_len += rte_ring_sc_dequeue_burst(txq,
287 (void **)&pkts[pkts_len],
291 /* Increment soft device TXQ */
293 if (txq_pos >= nb_tx_queues)
296 /* Hard device TXQ write when complete burst is available */
297 if (pkts_len >= DEFAULT_BURST_SIZE) {
298 for (pos = 0; pos < pkts_len; )
299 pos += rte_eth_tx_burst(p->hard.port_id,
300 p->params.hard.tx_queue_id,
302 (uint16_t)(pkts_len - pos));
310 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
311 for (pos = 0; pos < pkts_len; )
312 pos += rte_eth_tx_burst(p->hard.port_id,
313 p->params.hard.tx_queue_id,
315 (uint16_t)(pkts_len - pos));
321 p->soft.def.txq_pos = txq_pos;
322 p->soft.def.pkts_len = pkts_len;
323 p->soft.def.flush_count = flush_count + 1;
328 static __rte_always_inline int
329 run_tm(struct rte_eth_dev *dev)
331 struct pmd_internals *p = dev->data->dev_private;
333 /* Persistent context: Read Only (update not required) */
334 struct rte_sched_port *sched = p->soft.tm.sched;
335 struct rte_mbuf **pkts_enq = p->soft.tm.pkts_enq;
336 struct rte_mbuf **pkts_deq = p->soft.tm.pkts_deq;
337 uint32_t enq_bsz = p->params.soft.tm.enq_bsz;
338 uint32_t deq_bsz = p->params.soft.tm.deq_bsz;
339 uint16_t nb_tx_queues = dev->data->nb_tx_queues;
341 /* Persistent context: Read - Write (update required) */
342 uint32_t txq_pos = p->soft.tm.txq_pos;
343 uint32_t pkts_enq_len = p->soft.tm.pkts_enq_len;
344 uint32_t flush_count = p->soft.tm.flush_count;
346 /* Not part of the persistent context */
347 uint32_t pkts_deq_len, pos;
350 /* Soft device TXQ read, TM enqueue */
351 for (i = 0; i < nb_tx_queues; i++) {
352 struct rte_ring *txq = dev->data->tx_queues[txq_pos];
354 /* Read TXQ burst to packet enqueue buffer */
355 pkts_enq_len += rte_ring_sc_dequeue_burst(txq,
356 (void **)&pkts_enq[pkts_enq_len],
362 if (txq_pos >= nb_tx_queues)
365 /* TM enqueue when complete burst is available */
366 if (pkts_enq_len >= enq_bsz) {
367 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
375 if (flush_count >= FLUSH_COUNT_THRESHOLD) {
377 rte_sched_port_enqueue(sched, pkts_enq, pkts_enq_len);
383 p->soft.tm.txq_pos = txq_pos;
384 p->soft.tm.pkts_enq_len = pkts_enq_len;
385 p->soft.tm.flush_count = flush_count + 1;
387 /* TM dequeue, Hard device TXQ write */
388 pkts_deq_len = rte_sched_port_dequeue(sched, pkts_deq, deq_bsz);
390 for (pos = 0; pos < pkts_deq_len; )
391 pos += rte_eth_tx_burst(p->hard.port_id,
392 p->params.hard.tx_queue_id,
394 (uint16_t)(pkts_deq_len - pos));
400 rte_pmd_softnic_run(uint16_t port_id)
402 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
404 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
405 RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, 0);
408 return (tm_used(dev)) ? run_tm(dev) : run_default(dev);
411 static struct ether_addr eth_addr = { .addr_bytes = {0} };
414 eth_dev_speed_max_mbps(uint32_t speed_capa)
416 uint32_t rate_mbps[32] = {
434 uint32_t pos = (speed_capa) ? (31 - __builtin_clz(speed_capa)) : 0;
435 return rate_mbps[pos];
439 default_init(struct pmd_internals *p,
440 struct pmd_params *params,
443 p->soft.def.pkts = rte_zmalloc_socket(params->soft.name,
444 2 * DEFAULT_BURST_SIZE * sizeof(struct rte_mbuf *),
448 if (p->soft.def.pkts == NULL)
455 default_free(struct pmd_internals *p)
457 rte_free(p->soft.def.pkts);
461 pmd_init(struct pmd_params *params, int numa_node)
463 struct pmd_internals *p;
466 p = rte_zmalloc_socket(params->soft.name,
467 sizeof(struct pmd_internals),
473 memcpy(&p->params, params, sizeof(p->params));
474 rte_eth_dev_get_port_by_name(params->hard.name, &p->hard.port_id);
477 status = default_init(p, params, numa_node);
479 free(p->params.hard.name);
484 /* Traffic Management (TM)*/
485 if (params->soft.flags & PMD_FEATURE_TM) {
486 status = tm_init(p, params, numa_node);
489 free(p->params.hard.name);
499 pmd_free(struct pmd_internals *p)
501 if (p->params.soft.flags & PMD_FEATURE_TM)
506 free(p->params.hard.name);
511 pmd_ethdev_register(struct rte_vdev_device *vdev,
512 struct pmd_params *params,
515 struct rte_eth_dev_info hard_info;
516 struct rte_eth_dev *soft_dev;
519 uint16_t hard_port_id;
521 rte_eth_dev_get_port_by_name(params->hard.name, &hard_port_id);
522 rte_eth_dev_info_get(hard_port_id, &hard_info);
523 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
524 numa_node = rte_eth_dev_socket_id(hard_port_id);
526 /* Ethdev entry allocation */
527 soft_dev = rte_eth_dev_allocate(params->soft.name);
532 soft_dev->rx_pkt_burst = (params->soft.intrusive) ?
533 NULL : /* set up later */
535 soft_dev->tx_pkt_burst = pmd_tx_pkt_burst;
536 soft_dev->tx_pkt_prepare = NULL;
537 soft_dev->dev_ops = &pmd_ops;
538 soft_dev->device = &vdev->device;
541 soft_dev->data->dev_private = dev_private;
542 soft_dev->data->dev_link.link_speed = hard_speed;
543 soft_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
544 soft_dev->data->dev_link.link_autoneg = ETH_LINK_SPEED_FIXED;
545 soft_dev->data->dev_link.link_status = ETH_LINK_DOWN;
546 soft_dev->data->mac_addrs = ð_addr;
547 soft_dev->data->promiscuous = 1;
548 soft_dev->data->kdrv = RTE_KDRV_NONE;
549 soft_dev->data->numa_node = numa_node;
550 soft_dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE;
556 get_string(const char *key __rte_unused, const char *value, void *extra_args)
558 if (!value || !extra_args)
561 *(char **)extra_args = strdup(value);
563 if (!*(char **)extra_args)
570 get_uint32(const char *key __rte_unused, const char *value, void *extra_args)
572 if (!value || !extra_args)
575 *(uint32_t *)extra_args = strtoull(value, NULL, 0);
581 pmd_parse_args(struct pmd_params *p, const char *name, const char *params)
583 struct rte_kvargs *kvlist;
586 kvlist = rte_kvargs_parse(params, pmd_valid_args);
590 /* Set default values */
591 memset(p, 0, sizeof(*p));
593 p->soft.intrusive = INTRUSIVE;
595 p->soft.tm.nb_queues = SOFTNIC_SOFT_TM_NB_QUEUES;
596 for (i = 0; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
597 p->soft.tm.qsize[i] = SOFTNIC_SOFT_TM_QUEUE_SIZE;
598 p->soft.tm.enq_bsz = SOFTNIC_SOFT_TM_ENQ_BSZ;
599 p->soft.tm.deq_bsz = SOFTNIC_SOFT_TM_DEQ_BSZ;
600 p->hard.tx_queue_id = SOFTNIC_HARD_TX_QUEUE_ID;
602 /* SOFT: TM (optional) */
603 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM) == 1) {
606 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM,
611 if (strcmp(s, "on") == 0)
612 p->soft.flags |= PMD_FEATURE_TM;
613 else if (strcmp(s, "off") == 0)
614 p->soft.flags &= ~PMD_FEATURE_TM;
623 /* SOFT: TM rate (measured in bytes/second) (optional) */
624 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_RATE) == 1) {
625 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_RATE,
626 &get_uint32, &p->soft.tm.rate);
630 p->soft.flags |= PMD_FEATURE_TM;
633 /* SOFT: TM number of queues (optional) */
634 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES) == 1) {
635 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_NB_QUEUES,
636 &get_uint32, &p->soft.tm.nb_queues);
640 p->soft.flags |= PMD_FEATURE_TM;
643 /* SOFT: TM queue size 0 .. 3 (optional) */
644 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE0) == 1) {
647 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE0,
648 &get_uint32, &qsize);
652 p->soft.tm.qsize[0] = (uint16_t)qsize;
653 p->soft.flags |= PMD_FEATURE_TM;
656 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE1) == 1) {
659 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE1,
660 &get_uint32, &qsize);
664 p->soft.tm.qsize[1] = (uint16_t)qsize;
665 p->soft.flags |= PMD_FEATURE_TM;
668 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE2) == 1) {
671 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE2,
672 &get_uint32, &qsize);
676 p->soft.tm.qsize[2] = (uint16_t)qsize;
677 p->soft.flags |= PMD_FEATURE_TM;
680 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_QSIZE3) == 1) {
683 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_QSIZE3,
684 &get_uint32, &qsize);
688 p->soft.tm.qsize[3] = (uint16_t)qsize;
689 p->soft.flags |= PMD_FEATURE_TM;
692 /* SOFT: TM enqueue burst size (optional) */
693 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ) == 1) {
694 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_ENQ_BSZ,
695 &get_uint32, &p->soft.tm.enq_bsz);
699 p->soft.flags |= PMD_FEATURE_TM;
702 /* SOFT: TM dequeue burst size (optional) */
703 if (rte_kvargs_count(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ) == 1) {
704 ret = rte_kvargs_process(kvlist, PMD_PARAM_SOFT_TM_DEQ_BSZ,
705 &get_uint32, &p->soft.tm.deq_bsz);
709 p->soft.flags |= PMD_FEATURE_TM;
712 /* HARD: name (mandatory) */
713 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_NAME) == 1) {
714 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_NAME,
715 &get_string, &p->hard.name);
723 /* HARD: tx_queue_id (optional) */
724 if (rte_kvargs_count(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID) == 1) {
725 ret = rte_kvargs_process(kvlist, PMD_PARAM_HARD_TX_QUEUE_ID,
726 &get_uint32, &p->hard.tx_queue_id);
732 rte_kvargs_free(kvlist);
737 pmd_probe(struct rte_vdev_device *vdev)
743 struct rte_eth_dev_info hard_info;
745 uint16_t hard_port_id;
750 "Probing device \"%s\"\n",
751 rte_vdev_device_name(vdev));
753 /* Parse input arguments */
754 params = rte_vdev_device_args(vdev);
758 status = pmd_parse_args(&p, rte_vdev_device_name(vdev), params);
762 /* Check input arguments */
763 if (rte_eth_dev_get_port_by_name(p.hard.name, &hard_port_id))
766 rte_eth_dev_info_get(hard_port_id, &hard_info);
767 hard_speed = eth_dev_speed_max_mbps(hard_info.speed_capa);
768 numa_node = rte_eth_dev_socket_id(hard_port_id);
770 if (p.hard.tx_queue_id >= hard_info.max_tx_queues)
773 if (p.soft.flags & PMD_FEATURE_TM) {
774 status = tm_params_check(&p, hard_speed);
780 /* Allocate and initialize soft ethdev private data */
781 dev_private = pmd_init(&p, numa_node);
782 if (dev_private == NULL)
785 /* Register soft ethdev */
787 "Creating soft ethdev \"%s\" for hard ethdev \"%s\"\n",
788 p.soft.name, p.hard.name);
790 status = pmd_ethdev_register(vdev, &p, dev_private);
792 pmd_free(dev_private);
800 pmd_remove(struct rte_vdev_device *vdev)
802 struct rte_eth_dev *dev = NULL;
803 struct pmd_internals *p;
808 RTE_LOG(INFO, PMD, "Removing device \"%s\"\n",
809 rte_vdev_device_name(vdev));
811 /* Find the ethdev entry */
812 dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
815 p = dev->data->dev_private;
817 /* Free device data structures*/
820 rte_eth_dev_release_port(dev);
825 static struct rte_vdev_driver pmd_softnic_drv = {
827 .remove = pmd_remove,
830 RTE_PMD_REGISTER_VDEV(net_softnic, pmd_softnic_drv);
831 RTE_PMD_REGISTER_PARAM_STRING(net_softnic,
832 PMD_PARAM_SOFT_TM "=on|off "
833 PMD_PARAM_SOFT_TM_RATE "=<int> "
834 PMD_PARAM_SOFT_TM_NB_QUEUES "=<int> "
835 PMD_PARAM_SOFT_TM_QSIZE0 "=<int> "
836 PMD_PARAM_SOFT_TM_QSIZE1 "=<int> "
837 PMD_PARAM_SOFT_TM_QSIZE2 "=<int> "
838 PMD_PARAM_SOFT_TM_QSIZE3 "=<int> "
839 PMD_PARAM_SOFT_TM_ENQ_BSZ "=<int> "
840 PMD_PARAM_SOFT_TM_DEQ_BSZ "=<int> "
841 PMD_PARAM_HARD_NAME "=<string> "
842 PMD_PARAM_HARD_TX_QUEUE_ID "=<int>");