4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_cycles.h>
38 #include <rte_ethdev.h>
40 #include <rte_meter.h>
41 #include <rte_eth_softnic.h>
46 #define SUBPORT_NODES_PER_PORT 1
47 #define PIPE_NODES_PER_SUBPORT 4096
48 #define TC_NODES_PER_PIPE 4
49 #define QUEUE_NODES_PER_TC 4
51 #define NUM_PIPE_NODES \
52 (SUBPORT_NODES_PER_PORT * PIPE_NODES_PER_SUBPORT)
54 #define NUM_TC_NODES \
55 (NUM_PIPE_NODES * TC_NODES_PER_PIPE)
57 #define ROOT_NODE_ID 1000000
58 #define SUBPORT_NODES_START_ID 900000
59 #define PIPE_NODES_START_ID 800000
60 #define TC_NODES_START_ID 700000
62 #define STATS_MASK_DEFAULT \
63 (RTE_TM_STATS_N_PKTS | \
64 RTE_TM_STATS_N_BYTES | \
65 RTE_TM_STATS_N_PKTS_GREEN_DROPPED | \
66 RTE_TM_STATS_N_BYTES_GREEN_DROPPED)
68 #define STATS_MASK_QUEUE \
69 (STATS_MASK_DEFAULT | \
70 RTE_TM_STATS_N_PKTS_QUEUED)
72 #define BYTES_IN_MBPS (1000 * 1000 / 8)
73 #define TOKEN_BUCKET_SIZE 1000000
75 /* TM Hierarchy Levels */
76 enum tm_hierarchy_level {
77 TM_NODE_LEVEL_PORT = 0,
78 TM_NODE_LEVEL_SUBPORT,
87 uint32_t root_node_id;
88 uint32_t subport_node_id[SUBPORT_NODES_PER_PORT];
89 uint32_t pipe_node_id[SUBPORT_NODES_PER_PORT][PIPE_NODES_PER_SUBPORT];
90 uint32_t tc_node_id[NUM_PIPE_NODES][TC_NODES_PER_PIPE];
91 uint32_t queue_node_id[NUM_TC_NODES][QUEUE_NODES_PER_TC];
93 /* TM Hierarchy Nodes Shaper Rates */
94 uint32_t root_node_shaper_rate;
95 uint32_t subport_node_shaper_rate;
96 uint32_t pipe_node_shaper_rate;
97 uint32_t tc_node_shaper_rate;
98 uint32_t tc_node_shared_shaper_rate;
103 #define BITFIELD(byte_array, slab_pos, slab_mask, slab_shr) \
105 uint64_t slab = *((uint64_t *) &byte_array[slab_pos]); \
107 (rte_be_to_cpu_64(slab) & slab_mask) >> slab_shr; \
111 #define RTE_SCHED_PORT_HIERARCHY(subport, pipe, \
112 traffic_class, queue, color) \
113 ((((uint64_t) (queue)) & 0x3) | \
114 ((((uint64_t) (traffic_class)) & 0x3) << 2) | \
115 ((((uint64_t) (color)) & 0x3) << 4) | \
116 ((((uint64_t) (subport)) & 0xFFFF) << 16) | \
117 ((((uint64_t) (pipe)) & 0xFFFFFFFF) << 32))
121 pkt_metadata_set(struct rte_port *p, struct rte_mbuf **pkts,
124 struct softnic_port_tm *tm = &p->softport.tm;
127 for (i = 0; i < (n_pkts & (~0x3)); i += 4) {
128 struct rte_mbuf *pkt0 = pkts[i];
129 struct rte_mbuf *pkt1 = pkts[i + 1];
130 struct rte_mbuf *pkt2 = pkts[i + 2];
131 struct rte_mbuf *pkt3 = pkts[i + 3];
133 uint8_t *pkt0_data = rte_pktmbuf_mtod(pkt0, uint8_t *);
134 uint8_t *pkt1_data = rte_pktmbuf_mtod(pkt1, uint8_t *);
135 uint8_t *pkt2_data = rte_pktmbuf_mtod(pkt2, uint8_t *);
136 uint8_t *pkt3_data = rte_pktmbuf_mtod(pkt3, uint8_t *);
138 uint64_t pkt0_subport = BITFIELD(pkt0_data,
139 tm->tm_pktfield0_slabpos,
140 tm->tm_pktfield0_slabmask,
141 tm->tm_pktfield0_slabshr);
142 uint64_t pkt0_pipe = BITFIELD(pkt0_data,
143 tm->tm_pktfield1_slabpos,
144 tm->tm_pktfield1_slabmask,
145 tm->tm_pktfield1_slabshr);
146 uint64_t pkt0_dscp = BITFIELD(pkt0_data,
147 tm->tm_pktfield2_slabpos,
148 tm->tm_pktfield2_slabmask,
149 tm->tm_pktfield2_slabshr);
150 uint32_t pkt0_tc = tm->tm_tc_table[pkt0_dscp & 0x3F] >> 2;
151 uint32_t pkt0_tc_q = tm->tm_tc_table[pkt0_dscp & 0x3F] & 0x3;
152 uint64_t pkt1_subport = BITFIELD(pkt1_data,
153 tm->tm_pktfield0_slabpos,
154 tm->tm_pktfield0_slabmask,
155 tm->tm_pktfield0_slabshr);
156 uint64_t pkt1_pipe = BITFIELD(pkt1_data,
157 tm->tm_pktfield1_slabpos,
158 tm->tm_pktfield1_slabmask,
159 tm->tm_pktfield1_slabshr);
160 uint64_t pkt1_dscp = BITFIELD(pkt1_data,
161 tm->tm_pktfield2_slabpos,
162 tm->tm_pktfield2_slabmask,
163 tm->tm_pktfield2_slabshr);
164 uint32_t pkt1_tc = tm->tm_tc_table[pkt1_dscp & 0x3F] >> 2;
165 uint32_t pkt1_tc_q = tm->tm_tc_table[pkt1_dscp & 0x3F] & 0x3;
167 uint64_t pkt2_subport = BITFIELD(pkt2_data,
168 tm->tm_pktfield0_slabpos,
169 tm->tm_pktfield0_slabmask,
170 tm->tm_pktfield0_slabshr);
171 uint64_t pkt2_pipe = BITFIELD(pkt2_data,
172 tm->tm_pktfield1_slabpos,
173 tm->tm_pktfield1_slabmask,
174 tm->tm_pktfield1_slabshr);
175 uint64_t pkt2_dscp = BITFIELD(pkt2_data,
176 tm->tm_pktfield2_slabpos,
177 tm->tm_pktfield2_slabmask,
178 tm->tm_pktfield2_slabshr);
179 uint32_t pkt2_tc = tm->tm_tc_table[pkt2_dscp & 0x3F] >> 2;
180 uint32_t pkt2_tc_q = tm->tm_tc_table[pkt2_dscp & 0x3F] & 0x3;
182 uint64_t pkt3_subport = BITFIELD(pkt3_data,
183 tm->tm_pktfield0_slabpos,
184 tm->tm_pktfield0_slabmask,
185 tm->tm_pktfield0_slabshr);
186 uint64_t pkt3_pipe = BITFIELD(pkt3_data,
187 tm->tm_pktfield1_slabpos,
188 tm->tm_pktfield1_slabmask,
189 tm->tm_pktfield1_slabshr);
190 uint64_t pkt3_dscp = BITFIELD(pkt3_data,
191 tm->tm_pktfield2_slabpos,
192 tm->tm_pktfield2_slabmask,
193 tm->tm_pktfield2_slabshr);
194 uint32_t pkt3_tc = tm->tm_tc_table[pkt3_dscp & 0x3F] >> 2;
195 uint32_t pkt3_tc_q = tm->tm_tc_table[pkt3_dscp & 0x3F] & 0x3;
197 uint64_t pkt0_sched = RTE_SCHED_PORT_HIERARCHY(pkt0_subport,
202 uint64_t pkt1_sched = RTE_SCHED_PORT_HIERARCHY(pkt1_subport,
207 uint64_t pkt2_sched = RTE_SCHED_PORT_HIERARCHY(pkt2_subport,
212 uint64_t pkt3_sched = RTE_SCHED_PORT_HIERARCHY(pkt3_subport,
218 pkt0->hash.sched.lo = pkt0_sched & 0xFFFFFFFF;
219 pkt0->hash.sched.hi = pkt0_sched >> 32;
220 pkt1->hash.sched.lo = pkt1_sched & 0xFFFFFFFF;
221 pkt1->hash.sched.hi = pkt1_sched >> 32;
222 pkt2->hash.sched.lo = pkt2_sched & 0xFFFFFFFF;
223 pkt2->hash.sched.hi = pkt2_sched >> 32;
224 pkt3->hash.sched.lo = pkt3_sched & 0xFFFFFFFF;
225 pkt3->hash.sched.hi = pkt3_sched >> 32;
228 for (; i < n_pkts; i++) {
229 struct rte_mbuf *pkt = pkts[i];
231 uint8_t *pkt_data = rte_pktmbuf_mtod(pkt, uint8_t *);
233 uint64_t pkt_subport = BITFIELD(pkt_data,
234 tm->tm_pktfield0_slabpos,
235 tm->tm_pktfield0_slabmask,
236 tm->tm_pktfield0_slabshr);
237 uint64_t pkt_pipe = BITFIELD(pkt_data,
238 tm->tm_pktfield1_slabpos,
239 tm->tm_pktfield1_slabmask,
240 tm->tm_pktfield1_slabshr);
241 uint64_t pkt_dscp = BITFIELD(pkt_data,
242 tm->tm_pktfield2_slabpos,
243 tm->tm_pktfield2_slabmask,
244 tm->tm_pktfield2_slabshr);
245 uint32_t pkt_tc = tm->tm_tc_table[pkt_dscp & 0x3F] >> 2;
246 uint32_t pkt_tc_q = tm->tm_tc_table[pkt_dscp & 0x3F] & 0x3;
248 uint64_t pkt_sched = RTE_SCHED_PORT_HIERARCHY(pkt_subport,
254 pkt->hash.sched.lo = pkt_sched & 0xFFFFFFFF;
255 pkt->hash.sched.hi = pkt_sched >> 32;
260 * Soft port packet forward
263 softport_packet_fwd(struct fwd_stream *fs)
265 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
266 struct rte_port *rte_tx_port = &ports[fs->tx_port];
271 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
274 uint64_t core_cycles;
277 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
278 start_tsc = rte_rdtsc();
281 /* Packets Receive */
282 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue,
283 pkts_burst, nb_pkt_per_burst);
284 fs->rx_packets += nb_rx;
286 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
287 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
290 if (rte_tx_port->softnic_enable) {
291 /* Set packet metadata if tm flag enabled */
292 if (rte_tx_port->softport.tm_flag)
293 pkt_metadata_set(rte_tx_port, pkts_burst, nb_rx);
296 rte_pmd_softnic_run(fs->tx_port);
298 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
301 /* Retry if necessary */
302 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
304 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
305 rte_delay_us(burst_tx_delay_time);
306 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
307 &pkts_burst[nb_tx], nb_rx - nb_tx);
310 fs->tx_packets += nb_tx;
312 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
313 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
316 if (unlikely(nb_tx < nb_rx)) {
317 fs->fwd_dropped += (nb_rx - nb_tx);
319 rte_pktmbuf_free(pkts_burst[nb_tx]);
320 } while (++nb_tx < nb_rx);
322 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
323 end_tsc = rte_rdtsc();
324 core_cycles = (end_tsc - start_tsc);
325 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
330 set_tm_hiearchy_nodes_shaper_rate(portid_t port_id, struct tm_hierarchy *h)
332 struct rte_eth_link link_params;
333 uint64_t tm_port_rate;
335 memset(&link_params, 0, sizeof(link_params));
337 rte_eth_link_get(port_id, &link_params);
338 tm_port_rate = link_params.link_speed * BYTES_IN_MBPS;
340 if (tm_port_rate > UINT32_MAX)
341 tm_port_rate = UINT32_MAX;
343 /* Set tm hierarchy shapers rate */
344 h->root_node_shaper_rate = tm_port_rate;
345 h->subport_node_shaper_rate =
346 tm_port_rate / SUBPORT_NODES_PER_PORT;
347 h->pipe_node_shaper_rate
348 = h->subport_node_shaper_rate / PIPE_NODES_PER_SUBPORT;
349 h->tc_node_shaper_rate = h->pipe_node_shaper_rate;
350 h->tc_node_shared_shaper_rate = h->subport_node_shaper_rate;
354 softport_tm_root_node_add(portid_t port_id, struct tm_hierarchy *h,
355 struct rte_tm_error *error)
357 struct rte_tm_node_params rnp;
358 struct rte_tm_shaper_params rsp;
359 uint32_t priority, weight, level_id, shaper_profile_id;
361 memset(&rsp, 0, sizeof(struct rte_tm_shaper_params));
362 memset(&rnp, 0, sizeof(struct rte_tm_node_params));
364 /* Shaper profile Parameters */
365 rsp.peak.rate = h->root_node_shaper_rate;
366 rsp.peak.size = TOKEN_BUCKET_SIZE;
367 rsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
368 shaper_profile_id = 0;
370 if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
372 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
373 __func__, error->type, error->message,
378 /* Root Node Parameters */
379 h->root_node_id = ROOT_NODE_ID;
382 level_id = TM_NODE_LEVEL_PORT;
383 rnp.shaper_profile_id = shaper_profile_id;
384 rnp.nonleaf.n_sp_priorities = 1;
385 rnp.stats_mask = STATS_MASK_DEFAULT;
387 /* Add Node to TM Hierarchy */
388 if (rte_tm_node_add(port_id, h->root_node_id, RTE_TM_NODE_ID_NULL,
389 priority, weight, level_id, &rnp, error)) {
390 printf("%s ERROR(%d)-%s!(node_id %u, parent_id %u, level %u)\n",
391 __func__, error->type, error->message,
392 h->root_node_id, RTE_TM_NODE_ID_NULL,
399 printf(" Root node added (Start id %u, Count %u, level %u)\n",
400 h->root_node_id, 1, level_id);
406 softport_tm_subport_node_add(portid_t port_id, struct tm_hierarchy *h,
407 struct rte_tm_error *error)
409 uint32_t subport_parent_node_id, subport_node_id = 0;
410 struct rte_tm_node_params snp;
411 struct rte_tm_shaper_params ssp;
412 uint32_t priority, weight, level_id, shaper_profile_id;
415 memset(&ssp, 0, sizeof(struct rte_tm_shaper_params));
416 memset(&snp, 0, sizeof(struct rte_tm_node_params));
418 shaper_profile_id = h->n_shapers;
420 /* Add Shaper Profile to TM Hierarchy */
421 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
422 ssp.peak.rate = h->subport_node_shaper_rate;
423 ssp.peak.size = TOKEN_BUCKET_SIZE;
424 ssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
426 if (rte_tm_shaper_profile_add(port_id, shaper_profile_id,
428 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
429 __func__, error->type, error->message,
434 /* Node Parameters */
435 h->subport_node_id[i] = SUBPORT_NODES_START_ID + i;
436 subport_parent_node_id = h->root_node_id;
439 level_id = TM_NODE_LEVEL_SUBPORT;
440 snp.shaper_profile_id = shaper_profile_id;
441 snp.nonleaf.n_sp_priorities = 1;
442 snp.stats_mask = STATS_MASK_DEFAULT;
444 /* Add Node to TM Hiearchy */
445 if (rte_tm_node_add(port_id,
446 h->subport_node_id[i],
447 subport_parent_node_id,
452 printf("%s ERROR(%d)-%s!(node %u,parent %u,level %u)\n",
456 h->subport_node_id[i],
457 subport_parent_node_id,
465 h->n_shapers = shaper_profile_id;
467 printf(" Subport nodes added (Start id %u, Count %u, level %u)\n",
468 h->subport_node_id[0], SUBPORT_NODES_PER_PORT, level_id);
474 softport_tm_pipe_node_add(portid_t port_id, struct tm_hierarchy *h,
475 struct rte_tm_error *error)
477 uint32_t pipe_parent_node_id;
478 struct rte_tm_node_params pnp;
479 struct rte_tm_shaper_params psp;
480 uint32_t priority, weight, level_id, shaper_profile_id;
483 memset(&psp, 0, sizeof(struct rte_tm_shaper_params));
484 memset(&pnp, 0, sizeof(struct rte_tm_node_params));
486 shaper_profile_id = h->n_shapers;
488 /* Shaper Profile Parameters */
489 psp.peak.rate = h->pipe_node_shaper_rate;
490 psp.peak.size = TOKEN_BUCKET_SIZE;
491 psp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
493 /* Pipe Node Parameters */
496 level_id = TM_NODE_LEVEL_PIPE;
497 pnp.nonleaf.n_sp_priorities = 4;
498 pnp.stats_mask = STATS_MASK_DEFAULT;
500 /* Add Shaper Profiles and Nodes to TM Hierarchy */
501 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
502 for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
503 if (rte_tm_shaper_profile_add(port_id,
504 shaper_profile_id, &psp, error)) {
505 printf("%s ERROR(%d)-%s!(shaper_id %u)\n ",
506 __func__, error->type, error->message,
510 pnp.shaper_profile_id = shaper_profile_id;
511 pipe_parent_node_id = h->subport_node_id[i];
512 h->pipe_node_id[i][j] = PIPE_NODES_START_ID +
513 (i * PIPE_NODES_PER_SUBPORT) + j;
515 if (rte_tm_node_add(port_id,
516 h->pipe_node_id[i][j],
518 priority, weight, level_id,
521 printf("%s ERROR(%d)-%s!(node %u,parent %u )\n",
525 h->pipe_node_id[i][j],
526 pipe_parent_node_id);
534 h->n_shapers = shaper_profile_id;
536 printf(" Pipe nodes added (Start id %u, Count %u, level %u)\n",
537 h->pipe_node_id[0][0], NUM_PIPE_NODES, level_id);
543 softport_tm_tc_node_add(portid_t port_id, struct tm_hierarchy *h,
544 struct rte_tm_error *error)
546 uint32_t tc_parent_node_id;
547 struct rte_tm_node_params tnp;
548 struct rte_tm_shaper_params tsp, tssp;
549 uint32_t shared_shaper_profile_id[TC_NODES_PER_PIPE];
550 uint32_t priority, weight, level_id, shaper_profile_id;
551 uint32_t pos, n_tc_nodes, i, j, k;
553 memset(&tsp, 0, sizeof(struct rte_tm_shaper_params));
554 memset(&tssp, 0, sizeof(struct rte_tm_shaper_params));
555 memset(&tnp, 0, sizeof(struct rte_tm_node_params));
557 shaper_profile_id = h->n_shapers;
559 /* Private Shaper Profile (TC) Parameters */
560 tsp.peak.rate = h->tc_node_shaper_rate;
561 tsp.peak.size = TOKEN_BUCKET_SIZE;
562 tsp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
564 /* Shared Shaper Profile (TC) Parameters */
565 tssp.peak.rate = h->tc_node_shared_shaper_rate;
566 tssp.peak.size = TOKEN_BUCKET_SIZE;
567 tssp.pkt_length_adjust = RTE_TM_ETH_FRAMING_OVERHEAD_FCS;
569 /* TC Node Parameters */
571 level_id = TM_NODE_LEVEL_TC;
572 tnp.n_shared_shapers = 1;
573 tnp.nonleaf.n_sp_priorities = 1;
574 tnp.stats_mask = STATS_MASK_DEFAULT;
576 /* Add Shared Shaper Profiles to TM Hierarchy */
577 for (i = 0; i < TC_NODES_PER_PIPE; i++) {
578 shared_shaper_profile_id[i] = shaper_profile_id;
580 if (rte_tm_shaper_profile_add(port_id,
581 shared_shaper_profile_id[i], &tssp, error)) {
582 printf("%s ERROR(%d)-%s!(Shared shaper profileid %u)\n",
583 __func__, error->type, error->message,
584 shared_shaper_profile_id[i]);
588 if (rte_tm_shared_shaper_add_update(port_id, i,
589 shared_shaper_profile_id[i], error)) {
590 printf("%s ERROR(%d)-%s!(Shared shaper id %u)\n",
591 __func__, error->type, error->message, i);
598 /* Add Shaper Profiles and Nodes to TM Hierarchy */
600 for (i = 0; i < SUBPORT_NODES_PER_PORT; i++) {
601 for (j = 0; j < PIPE_NODES_PER_SUBPORT; j++) {
602 for (k = 0; k < TC_NODES_PER_PIPE ; k++) {
604 tc_parent_node_id = h->pipe_node_id[i][j];
605 tnp.shared_shaper_id =
606 (uint32_t *)calloc(1, sizeof(uint32_t));
607 tnp.shared_shaper_id[0] = k;
608 pos = j + (i * PIPE_NODES_PER_SUBPORT);
609 h->tc_node_id[pos][k] =
610 TC_NODES_START_ID + n_tc_nodes;
612 if (rte_tm_shaper_profile_add(port_id,
613 shaper_profile_id, &tsp, error)) {
614 printf("%s ERROR(%d)-%s!(shaper %u)\n",
615 __func__, error->type,
621 tnp.shaper_profile_id = shaper_profile_id;
622 if (rte_tm_node_add(port_id,
623 h->tc_node_id[pos][k],
628 printf("%s ERROR(%d)-%s!(node id %u)\n",
632 h->tc_node_id[pos][k]);
642 h->n_shapers = shaper_profile_id;
644 printf(" TC nodes added (Start id %u, Count %u, level %u)\n",
645 h->tc_node_id[0][0], n_tc_nodes, level_id);
651 softport_tm_queue_node_add(portid_t port_id, struct tm_hierarchy *h,
652 struct rte_tm_error *error)
654 uint32_t queue_parent_node_id;
655 struct rte_tm_node_params qnp;
656 uint32_t priority, weight, level_id, pos;
657 uint32_t n_queue_nodes, i, j, k;
659 memset(&qnp, 0, sizeof(struct rte_tm_node_params));
661 /* Queue Node Parameters */
664 level_id = TM_NODE_LEVEL_QUEUE;
665 qnp.shaper_profile_id = RTE_TM_SHAPER_PROFILE_ID_NONE;
666 qnp.leaf.cman = RTE_TM_CMAN_TAIL_DROP;
667 qnp.stats_mask = STATS_MASK_QUEUE;
669 /* Add Queue Nodes to TM Hierarchy */
671 for (i = 0; i < NUM_PIPE_NODES; i++) {
672 for (j = 0; j < TC_NODES_PER_PIPE; j++) {
673 queue_parent_node_id = h->tc_node_id[i][j];
674 for (k = 0; k < QUEUE_NODES_PER_TC; k++) {
675 pos = j + (i * TC_NODES_PER_PIPE);
676 h->queue_node_id[pos][k] = n_queue_nodes;
677 if (rte_tm_node_add(port_id,
678 h->queue_node_id[pos][k],
679 queue_parent_node_id,
684 printf("%s ERROR(%d)-%s!(node %u)\n",
688 h->queue_node_id[pos][k]);
696 printf(" Queue nodes added (Start id %u, Count %u, level %u)\n",
697 h->queue_node_id[0][0], n_queue_nodes, level_id);
703 * TM Packet Field Setup
706 softport_tm_pktfield_setup(portid_t port_id)
708 struct rte_port *p = &ports[port_id];
709 uint64_t pktfield0_mask = 0;
710 uint64_t pktfield1_mask = 0x0000000FFF000000LLU;
711 uint64_t pktfield2_mask = 0x00000000000000FCLLU;
713 p->softport.tm = (struct softnic_port_tm) {
714 .n_subports_per_port = SUBPORT_NODES_PER_PORT,
715 .n_pipes_per_subport = PIPE_NODES_PER_SUBPORT,
717 /* Packet field to identify subport
719 * Default configuration assumes only one subport, thus
720 * the subport ID is hardcoded to 0
722 .tm_pktfield0_slabpos = 0,
723 .tm_pktfield0_slabmask = pktfield0_mask,
724 .tm_pktfield0_slabshr =
725 __builtin_ctzll(pktfield0_mask),
727 /* Packet field to identify pipe.
729 * Default value assumes Ethernet/IPv4/UDP packets,
730 * UDP payload bits 12 .. 23
732 .tm_pktfield1_slabpos = 40,
733 .tm_pktfield1_slabmask = pktfield1_mask,
734 .tm_pktfield1_slabshr =
735 __builtin_ctzll(pktfield1_mask),
737 /* Packet field used as index into TC translation table
738 * to identify the traffic class and queue.
740 * Default value assumes Ethernet/IPv4 packets, IPv4
743 .tm_pktfield2_slabpos = 8,
744 .tm_pktfield2_slabmask = pktfield2_mask,
745 .tm_pktfield2_slabshr =
746 __builtin_ctzll(pktfield2_mask),
749 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
750 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
751 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
752 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
753 }, /**< TC translation table */
758 softport_tm_hierarchy_specify(portid_t port_id, struct rte_tm_error *error)
761 struct tm_hierarchy h;
764 memset(&h, 0, sizeof(struct tm_hierarchy));
766 /* TM hierarchy shapers rate */
767 set_tm_hiearchy_nodes_shaper_rate(port_id, &h);
769 /* Add root node (level 0) */
770 status = softport_tm_root_node_add(port_id, &h, error);
774 /* Add subport node (level 1) */
775 status = softport_tm_subport_node_add(port_id, &h, error);
779 /* Add pipe nodes (level 2) */
780 status = softport_tm_pipe_node_add(port_id, &h, error);
784 /* Add traffic class nodes (level 3) */
785 status = softport_tm_tc_node_add(port_id, &h, error);
789 /* Add queue nodes (level 4) */
790 status = softport_tm_queue_node_add(port_id, &h, error);
794 /* TM packet fields setup */
795 softport_tm_pktfield_setup(port_id);
804 softport_tm_begin(portid_t pi)
806 struct rte_port *port = &ports[pi];
808 /* Soft port TM flag */
809 if (port->softport.tm_flag == 1) {
810 printf("\n\n TM feature available on port %u\n", pi);
812 /* Soft port TM hierarchy configuration */
813 if ((port->softport.tm.hierarchy_config == 0) &&
814 (port->softport.tm.default_hierarchy_enable == 1)) {
815 struct rte_tm_error error;
819 rte_eth_dev_stop(pi);
821 /* TM hierarchy specification */
822 status = softport_tm_hierarchy_specify(pi, &error);
824 printf(" TM Hierarchy built error(%d) - %s\n",
825 error.type, error.message);
828 printf("\n TM Hierarchy Specified!\n\v");
830 /* TM hierarchy commit */
831 status = rte_tm_hierarchy_commit(pi, 0, &error);
833 printf(" Hierarchy commit error(%d) - %s\n",
834 error.type, error.message);
837 printf(" Hierarchy Committed (port %u)!", pi);
838 port->softport.tm.hierarchy_config = 1;
841 status = rte_eth_dev_start(pi);
843 printf("\n Port %u start error!\n", pi);
846 printf("\n Port %u started!\n", pi);
850 printf("\n TM feature not available on port %u", pi);
853 struct fwd_engine softnic_tm_engine = {
854 .fwd_mode_name = "tm",
855 .port_fwd_begin = softport_tm_begin,
856 .port_fwd_end = NULL,
857 .packet_fwd = softport_packet_fwd,
860 struct fwd_engine softnic_tm_bypass_engine = {
861 .fwd_mode_name = "tm-bypass",
862 .port_fwd_begin = NULL,
863 .port_fwd_end = NULL,
864 .packet_fwd = softport_packet_fwd,