1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
7 #include <rte_cycles.h>
9 #include <rte_ethdev.h>
11 #include <rte_meter.h>
12 #include <rte_eth_softnic.h>
17 #define SUBPORT_NODES_PER_PORT 1
18 #define PIPE_NODES_PER_SUBPORT 4096
19 #define TC_NODES_PER_PIPE 4
20 #define QUEUE_NODES_PER_TC 4
22 #define NUM_PIPE_NODES \
23 (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT)
25 #define NUM_TC_NODES \
26 (NUM_PIPE_NODES * TC_NODES_PER_PIPE)
28 #define ROOT_NODE_ID 1000000
29 #define SUBPORT_NODES_START_ID 900000
30 #define PIPE_NODES_START_ID 800000
31 #define TC_NODES_START_ID 700000
33 #define STATS_MASK_DEFAULT \
34 (RTE_TM_STATS_N_PKTS | \
35 RTE_TM_STATS_N_BYTES | \
36 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
37 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
39 #define STATS_MASK_QUEUE \
40 (STATS_MASK_DEFAULT | \
41 RTE_TM_STATS_N_PKTS_QUEUED)
43 #define BYTES_IN_MBPS (1000 * 1000 / 8)
44 #define TOKEN_BUCKET_SIZE 1000000
46 /* TM Hierarchy Levels */
47 enum tm_hierarchy_level {
48 TM_NODE_LEVEL_PORT = 0,
49 TM_NODE_LEVEL_SUBPORT,
58 uint32_t root_node_id;
59 uint32_t subport_node_id[SUBPORT_NODES_PER_PORT];
60 uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT];
61 uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE];
62 uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC];
64 /* TM Hierarchy Nodes Shaper Rates */
65 uint32_t root_node_shaper_rate;
66 uint32_t subport_node_shaper_rate;
67 uint32_t pipe_node_shaper_rate;
68 uint32_t tc_node_shaper_rate;
69 uint32_t tc_node_shared_shaper_rate;
74 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
76 uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \
78 (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
82 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
83 traffic_class, queue, color) \
84 ((((uint64_t) (queue)) & 0x3) | \
85 ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
86 ((((uint64_t) (color)) & 0x3) << 4) | \
87 ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
88 ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
92 pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts,
95 struct softnic_port_tm *tm = &p->softport.tm;
98 for (i = 0; i < (n_pkts & (~0x3)); i += 4) {
99 struct rte_mbuf *pkt0 = pkts[i];
100 struct rte_mbuf *pkt1 = pkts[i + 1];
101 struct rte_mbuf *pkt2 = pkts[i + 2];
102 struct rte_mbuf *pkt3 = pkts[i + 3];
104 uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *);
105 uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *);
106 uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *);
107 uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *);
109 uint64_t pkt0_subport = BITFIELD(pkt0_data,
110 tm->tm_pktfield0_slabpos,
111 tm->tm_pktfield0_slabmask,
112 tm->tm_pktfield0_slabshr);
113 uint64_t pkt0_pipe = BITFIELD(pkt0_data,
114 tm->tm_pktfield1_slabpos,
115 tm->tm_pktfield1_slabmask,
116 tm->tm_pktfield1_slabshr);
117 uint64_t pkt0_dscp = BITFIELD(pkt0_data,
118 tm->tm_pktfield2_slabpos,
119 tm->tm_pktfield2_slabmask,
120 tm->tm_pktfield2_slabshr);
121 uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2;
122 uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3;
123 uint64_t pkt1_subport = BITFIELD(pkt1_data,
124 tm->tm_pktfield0_slabpos,
125 tm->tm_pktfield0_slabmask,
126 tm->tm_pktfield0_slabshr);
127 uint64_t pkt1_pipe = BITFIELD(pkt1_data,
128 tm->tm_pktfield1_slabpos,
129 tm->tm_pktfield1_slabmask,
130 tm->tm_pktfield1_slabshr);
131 uint64_t pkt1_dscp = BITFIELD(pkt1_data,
132 tm->tm_pktfield2_slabpos,
133 tm->tm_pktfield2_slabmask,
134 tm->tm_pktfield2_slabshr);
135 uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2;
136 uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3;
138 uint64_t pkt2_subport = BITFIELD(pkt2_data,
139 tm->tm_pktfield0_slabpos,
140 tm->tm_pktfield0_slabmask,
141 tm->tm_pktfield0_slabshr);
142 uint64_t pkt2_pipe = BITFIELD(pkt2_data,
143 tm->tm_pktfield1_slabpos,
144 tm->tm_pktfield1_slabmask,
145 tm->tm_pktfield1_slabshr);
146 uint64_t pkt2_dscp = BITFIELD(pkt2_data,
147 tm->tm_pktfield2_slabpos,
148 tm->tm_pktfield2_slabmask,
149 tm->tm_pktfield2_slabshr);
150 uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2;
151 uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3;
153 uint64_t pkt3_subport = BITFIELD(pkt3_data,
154 tm->tm_pktfield0_slabpos,
155 tm->tm_pktfield0_slabmask,
156 tm->tm_pktfield0_slabshr);
157 uint64_t pkt3_pipe = BITFIELD(pkt3_data,
158 tm->tm_pktfield1_slabpos,
159 tm->tm_pktfield1_slabmask,
160 tm->tm_pktfield1_slabshr);
161 uint64_t pkt3_dscp = BITFIELD(pkt3_data,
162 tm->tm_pktfield2_slabpos,
163 tm->tm_pktfield2_slabmask,
164 tm->tm_pktfield2_slabshr);
165 uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2;
166 uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3;
168 uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport,
173 uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport,
178 uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport,
183 uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport,
189 pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
190 pkt0->hash.sched.hi = pkt0_sched >> 32;
191 pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
192 pkt1->hash.sched.hi = pkt1_sched >> 32;
193 pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
194 pkt2->hash.sched.hi = pkt2_sched >> 32;
195 pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
196 pkt3->hash.sched.hi = pkt3_sched >> 32;
199 for (; i < n_pkts; i++) {
200 struct rte_mbuf *pkt = pkts[i];
202 uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
204 uint64_t pkt_subport = BITFIELD(pkt_data,
205 tm->tm_pktfield0_slabpos,
206 tm->tm_pktfield0_slabmask,
207 tm->tm_pktfield0_slabshr);
208 uint64_t pkt_pipe = BITFIELD(pkt_data,
209 tm->tm_pktfield1_slabpos,
210 tm->tm_pktfield1_slabmask,
211 tm->tm_pktfield1_slabshr);
212 uint64_t pkt_dscp = BITFIELD(pkt_data,
213 tm->tm_pktfield2_slabpos,
214 tm->tm_pktfield2_slabmask,
215 tm->tm_pktfield2_slabshr);
216 uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2;
217 uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3;
219 uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport,
225 pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
226 pkt->hash.sched.hi = pkt_sched >> 32;
231 * Soft port packet forward
234 softport_packet_fwd(struct fwd_stream *fs)
236 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
237 struct rte_port *rte_tx_port = &ports[fs->tx_port];
242 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
245 uint64_t core_cycles;
248 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
249 start_tsc = rte_rdtsc();
252 /* Packets Receive */
253 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
254 pkts_burst, nb_pkt_per_burst);
255 fs->rx_packets += nb_rx;
257 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
258 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
261 if (rte_tx_port->softnic_enable) {
262 /* Set packet metadata if tm flag enabled */
263 if (rte_tx_port->softport.tm_flag)
264 pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx);
267 rte_pmd_softnic_run(fs->tx_port);
269 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
272 /* Retry if necessary */
273 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
275 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
276 rte_delay_us(burst_tx_delay_time);
277 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
278 &pkts_burst[nb_tx], nb_rx - nb_tx);
281 fs->tx_packets += nb_tx;
283 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
284 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
287 if (unlikely(nb_tx < nb_rx)) {
288 fs->fwd_dropped += (nb_rx - nb_tx);
290 rte_pktmbuf_free(pkts_burst[nb_tx]);
291 } while (++nb_tx < nb_rx);
293 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
294 end_tsc = rte_rdtsc();
295 core_cycles = (end_tsc - start_tsc);
296 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
301 set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
303 struct rte_eth_link link_params;
304 uint64_t tm_port_rate;
306 memset(&link_params, 0, sizeof(link_params));
308 rte_eth_link_get(port_id, &link_params);
309 tm_port_rate = (uint64_t)link_params.link_speed * BYTES_IN_MBPS;
311 if (tm_port_rate > UINT32_MAX)
312 tm_port_rate = UINT32_MAX;
314 /* Set tm hierarchy shapers rate */
315 h->root_node_shaper_rate = tm_port_rate;
316 h->subport_node_shaper_rate =
317 tm_port_rate / SUBPORT_NODES_PER_PORT;
318 h->pipe_node_shaper_rate
319 = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT;
320 h->tc_node_shaper_rate = h->pipe_node_shaper_rate;
321 h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate;
325 softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
326 struct rte_tm_error *error)
328 struct rte_tm_node_params rnp;
329 struct rte_tm_shaper_params rsp;
330 uint32_t priority, weight, level_id, shaper_profile_id;
332 memset(&rsp, 0, sizeof(struct rte_tm_shaper_params));
333 memset(&rnp, 0, sizeof(struct rte_tm_node_params));
335 /* Shaper profile Parameters */
336 rsp.peak.rate = h->root_node_shaper_rate;
337 rsp.peak.size = TOKEN_BUCKET_SIZE;
338 rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
339 shaper_profile_id = 0;
341 if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
343 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
344 __func__, error->type, error->message,
349 /* Root Node Parameters */
350 h->root_node_id = ROOT_NODE_ID;
353 level_id = TM_NODE_LEVEL_PORT;
354 rnp.shaper_profile_id = shaper_profile_id;
355 rnp.nonleaf.n_sp_priorities = 1;
356 rnp.stats_mask = STATS_MASK_DEFAULT;
358 /* Add Node to TM Hierarchy */
359 if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL,
360 priority, weight, level_id, &rnp, error)) {
361 printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n",
362 __func__, error->type, error->message,
363 h->root_node_id, RTE_TM_NODE_ID_NULL,
370 printf(" Root node added (Start id %u, Count %u, level %u)\n",
371 h->root_node_id, 1, level_id);
377 softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
378 struct rte_tm_error *error)
380 uint32_t subport_parent_node_id, subport_node_id = 0;
381 struct rte_tm_node_params snp;
382 struct rte_tm_shaper_params ssp;
383 uint32_t priority, weight, level_id, shaper_profile_id;
386 memset(&ssp, 0, sizeof(struct rte_tm_shaper_params));
387 memset(&snp, 0, sizeof(struct rte_tm_node_params));
389 shaper_profile_id = h->n_shapers;
391 /* Add Shaper Profile to TM Hierarchy */
392 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
393 ssp.peak.rate = h->subport_node_shaper_rate;
394 ssp.peak.size = TOKEN_BUCKET_SIZE;
395 ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
397 if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
399 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
400 __func__, error->type, error->message,
405 /* Node Parameters */
406 h->subport_node_id[i] = SUBPORT_NODES_START_ID + i;
407 subport_parent_node_id = h->root_node_id;
410 level_id = TM_NODE_LEVEL_SUBPORT;
411 snp.shaper_profile_id = shaper_profile_id;
412 snp.nonleaf.n_sp_priorities = 1;
413 snp.stats_mask = STATS_MASK_DEFAULT;
415 /* Add Node to TM Hiearchy */
416 if (rte_tm_node_add(port_id,
417 h->subport_node_id[i],
418 subport_parent_node_id,
423 printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n",
427 h->subport_node_id[i],
428 subport_parent_node_id,
436 h->n_shapers = shaper_profile_id;
438 printf(" Subport nodes added (Start id %u, Count %u, level %u)\n",
439 h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id);
445 softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
446 struct rte_tm_error *error)
448 uint32_t pipe_parent_node_id;
449 struct rte_tm_node_params pnp;
450 struct rte_tm_shaper_params psp;
451 uint32_t priority, weight, level_id, shaper_profile_id;
454 memset(&psp, 0, sizeof(struct rte_tm_shaper_params));
455 memset(&pnp, 0, sizeof(struct rte_tm_node_params));
457 shaper_profile_id = h->n_shapers;
459 /* Shaper Profile Parameters */
460 psp.peak.rate = h->pipe_node_shaper_rate;
461 psp.peak.size = TOKEN_BUCKET_SIZE;
462 psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
464 /* Pipe Node Parameters */
467 level_id = TM_NODE_LEVEL_PIPE;
468 pnp.nonleaf.n_sp_priorities = 4;
469 pnp.stats_mask = STATS_MASK_DEFAULT;
471 /* Add Shaper Profiles and Nodes to TM Hierarchy */
472 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
473 for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
474 if (rte_tm_shaper_profile_add(port_id,
475 shaper_profile_id, &psp, error)) {
476 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
477 __func__, error->type, error->message,
481 pnp.shaper_profile_id = shaper_profile_id;
482 pipe_parent_node_id = h->subport_node_id[i];
483 h->pipe_node_id[i][j] = PIPE_NODES_START_ID +
484 (i * PIPE_NODES_PER_SUBPORT) + j;
486 if (rte_tm_node_add(port_id,
487 h->pipe_node_id[i][j],
489 priority, weight, level_id,
492 printf("%s ERROR(%d)-%s!(node %u,parent %u )\n",
496 h->pipe_node_id[i][j],
497 pipe_parent_node_id);
505 h->n_shapers = shaper_profile_id;
507 printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n",
508 h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id);
514 softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
515 struct rte_tm_error *error)
517 uint32_t tc_parent_node_id;
518 struct rte_tm_node_params tnp;
519 struct rte_tm_shaper_params tsp, tssp;
520 uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE];
521 uint32_t priority, weight, level_id, shaper_profile_id;
522 uint32_t pos, n_tc_nodes, i, j, k;
524 memset(&tsp, 0, sizeof(struct rte_tm_shaper_params));
525 memset(&tssp, 0, sizeof(struct rte_tm_shaper_params));
526 memset(&tnp, 0, sizeof(struct rte_tm_node_params));
528 shaper_profile_id = h->n_shapers;
530 /* Private Shaper Profile (TC) Parameters */
531 tsp.peak.rate = h->tc_node_shaper_rate;
532 tsp.peak.size = TOKEN_BUCKET_SIZE;
533 tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
535 /* Shared Shaper Profile (TC) Parameters */
536 tssp.peak.rate = h->tc_node_shared_shaper_rate;
537 tssp.peak.size = TOKEN_BUCKET_SIZE;
538 tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
540 /* TC Node Parameters */
542 level_id = TM_NODE_LEVEL_TC;
543 tnp.n_shared_shapers = 1;
544 tnp.nonleaf.n_sp_priorities = 1;
545 tnp.stats_mask = STATS_MASK_DEFAULT;
547 /* Add Shared Shaper Profiles to TM Hierarchy */
548 for (i = 0; i < TC_NODES_PER_PIPE; i++) {
549 shared_shaper_profile_id[i] = shaper_profile_id;
551 if (rte_tm_shaper_profile_add(port_id,
552 shared_shaper_profile_id[i], &tssp, error)) {
553 printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n",
554 __func__, error->type, error->message,
555 shared_shaper_profile_id[i]);
559 if (rte_tm_shared_shaper_add_update(port_id, i,
560 shared_shaper_profile_id[i], error)) {
561 printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n",
562 __func__, error->type, error->message, i);
569 /* Add Shaper Profiles and Nodes to TM Hierarchy */
571 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
572 for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
573 for (k = 0; k < TC_NODES_PER_PIPE ; k++) {
575 tc_parent_node_id = h->pipe_node_id[i][j];
576 tnp.shared_shaper_id =
577 (uint32_t *)calloc(1, sizeof(uint32_t));
578 if (tnp.shared_shaper_id == NULL) {
579 printf("Shared shaper mem alloc err\n");
582 tnp.shared_shaper_id[0] = k;
583 pos = j + (i * PIPE_NODES_PER_SUBPORT);
584 h->tc_node_id[pos][k] =
585 TC_NODES_START_ID + n_tc_nodes;
587 if (rte_tm_shaper_profile_add(port_id,
588 shaper_profile_id, &tsp, error)) {
589 printf("%s ERROR(%d)-%s!(shaper %u)\n",
590 __func__, error->type,
596 tnp.shaper_profile_id = shaper_profile_id;
597 if (rte_tm_node_add(port_id,
598 h->tc_node_id[pos][k],
603 printf("%s ERROR(%d)-%s!(node id %u)\n",
607 h->tc_node_id[pos][k]);
617 h->n_shapers = shaper_profile_id;
619 printf(" TC nodes added (Start id %u, Count %u, level %u)\n",
620 h->tc_node_id[0][0], n_tc_nodes, level_id);
626 softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
627 struct rte_tm_error *error)
629 uint32_t queue_parent_node_id;
630 struct rte_tm_node_params qnp;
631 uint32_t priority, weight, level_id, pos;
632 uint32_t n_queue_nodes, i, j, k;
634 memset(&qnp, 0, sizeof(struct rte_tm_node_params));
636 /* Queue Node Parameters */
639 level_id = TM_NODE_LEVEL_QUEUE;
640 qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
641 qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
642 qnp.stats_mask = STATS_MASK_QUEUE;
644 /* Add Queue Nodes to TM Hierarchy */
646 for (i = 0; i < NUM_PIPE_NODES; i++) {
647 for (j = 0; j < TC_NODES_PER_PIPE; j++) {
648 queue_parent_node_id = h->tc_node_id[i][j];
649 for (k = 0; k < QUEUE_NODES_PER_TC; k++) {
650 pos = j + (i * TC_NODES_PER_PIPE);
651 h->queue_node_id[pos][k] = n_queue_nodes;
652 if (rte_tm_node_add(port_id,
653 h->queue_node_id[pos][k],
654 queue_parent_node_id,
659 printf("%s ERROR(%d)-%s!(node %u)\n",
663 h->queue_node_id[pos][k]);
671 printf(" Queue nodes added (Start id %u, Count %u, level %u)\n",
672 h->queue_node_id[0][0], n_queue_nodes, level_id);
678 * TM Packet Field Setup
681 softport_tm_pktfield_setup(portid_t port_id)
683 struct rte_port *p = &ports[port_id];
684 uint64_t pktfield0_mask = 0;
685 uint64_t pktfield1_mask = 0x0000000FFF000000LLU;
686 uint64_t pktfield2_mask = 0x00000000000000FCLLU;
688 p->softport.tm = (struct softnic_port_tm) {
689 .n_subports_per_port = SUBPORT_NODES_PER_PORT,
690 .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT,
692 /* Packet field to identify subport
694 * Default configuration assumes only one subport, thus
695 * the subport ID is hardcoded to 0
697 .tm_pktfield0_slabpos = 0,
698 .tm_pktfield0_slabmask = pktfield0_mask,
699 .tm_pktfield0_slabshr =
700 __builtin_ctzll(pktfield0_mask),
702 /* Packet field to identify pipe.
704 * Default value assumes Ethernet/IPv4/UDP packets,
705 * UDP payload bits 12 .. 23
707 .tm_pktfield1_slabpos = 40,
708 .tm_pktfield1_slabmask = pktfield1_mask,
709 .tm_pktfield1_slabshr =
710 __builtin_ctzll(pktfield1_mask),
712 /* Packet field used as index into TC translation table
713 * to identify the traffic class and queue.
715 * Default value assumes Ethernet/IPv4 packets, IPv4
718 .tm_pktfield2_slabpos = 8,
719 .tm_pktfield2_slabmask = pktfield2_mask,
720 .tm_pktfield2_slabshr =
721 __builtin_ctzll(pktfield2_mask),
724 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
725 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
726 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
727 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
728 }, /**< TC translation table */
733 softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
736 struct tm_hierarchy h;
739 memset(&h, 0, sizeof(struct tm_hierarchy));
741 /* TM hierarchy shapers rate */
742 set_tm_hiearchy_nodes_shaper_rate(port_id, &h);
744 /* Add root node (level 0) */
745 status = softport_tm_root_node_add(port_id, &h, error);
749 /* Add subport node (level 1) */
750 status = softport_tm_subport_node_add(port_id, &h, error);
754 /* Add pipe nodes (level 2) */
755 status = softport_tm_pipe_node_add(port_id, &h, error);
759 /* Add traffic class nodes (level 3) */
760 status = softport_tm_tc_node_add(port_id, &h, error);
764 /* Add queue nodes (level 4) */
765 status = softport_tm_queue_node_add(port_id, &h, error);
769 /* TM packet fields setup */
770 softport_tm_pktfield_setup(port_id);
779 softport_tm_begin(portid_t pi)
781 struct rte_port *port = &ports[pi];
783 /* Soft port TM flag */
784 if (port->softport.tm_flag == 1) {
785 printf("\n\n TM feature available on port %u\n", pi);
787 /* Soft port TM hierarchy configuration */
788 if ((port->softport.tm.hierarchy_config == 0) &&
789 (port->softport.tm.default_hierarchy_enable == 1)) {
790 struct rte_tm_error error;
794 rte_eth_dev_stop(pi);
796 /* TM hierarchy specification */
797 status = softport_tm_hierarchy_specify(pi, &error);
799 printf(" TM Hierarchy built error(%d) - %s\n",
800 error.type, error.message);
803 printf("\n TM Hierarchy Specified!\n\v");
805 /* TM hierarchy commit */
806 status = rte_tm_hierarchy_commit(pi, 0, &error);
808 printf(" Hierarchy commit error(%d) - %s\n",
809 error.type, error.message);
812 printf(" Hierarchy Committed (port %u)!", pi);
813 port->softport.tm.hierarchy_config = 1;
816 status = rte_eth_dev_start(pi);
818 printf("\n Port %u start error!\n", pi);
821 printf("\n Port %u started!\n", pi);
825 printf("\n TM feature not available on port %u", pi);
828 struct fwd_engine softnic_tm_engine = {
829 .fwd_mode_name = "tm",
830 .port_fwd_begin = softport_tm_begin,
831 .port_fwd_end = NULL,
832 .packet_fwd = softport_packet_fwd,
835 struct fwd_engine softnic_tm_bypass_engine = {
836 .fwd_mode_name = "tm-bypass",
837 .port_fwd_begin = NULL,
838 .port_fwd_end = NULL,
839 .packet_fwd = softport_packet_fwd,