#define SUBPORT 0
#define PIPE 1
#define TC 2
-#define QUEUE 3
-
-static struct rte_sched_subport_params subport_param[] = {
- {
- .tb_rate = 1250000000,
- .tb_size = 1000000,
-
- .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
- .tc_period = 10,
- },
-};
+#define QUEUE 0
static struct rte_sched_pipe_params pipe_profile[] = {
{ /* Profile #0 */
.tb_rate = 305175,
.tb_size = 1000000,
- .tc_rate = {305175, 305175, 305175, 305175},
+ .tc_rate = {305175, 305175, 305175, 305175, 305175, 305175,
+ 305175, 305175, 305175, 305175, 305175, 305175, 305175},
.tc_period = 40,
+ .tc_ov_weight = 1,
- .wrr_weights = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1},
+ .wrr_weights = {1, 1, 1, 1},
+ },
+};
+
+static struct rte_sched_subport_params subport_param[] = {
+ {
+ .tb_rate = 1250000000,
+ .tb_size = 1000000,
+
+ .tc_rate = {1250000000, 1250000000, 1250000000, 1250000000,
+ 1250000000, 1250000000, 1250000000, 1250000000, 1250000000,
+ 1250000000, 1250000000, 1250000000, 1250000000},
+ .tc_period = 10,
+ .n_pipes_per_subport_enabled = 1024,
+ .qsize = {32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32},
+ .pipe_profiles = pipe_profile,
+ .n_pipe_profiles = 1,
+ .n_max_pipe_profiles = 1,
},
};
.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
.n_subports_per_port = 1,
.n_pipes_per_subport = 1024,
- .qsize = {32, 32, 32, 32},
- .pipe_profiles = pipe_profile,
- .n_pipe_profiles = 1,
};
#define NB_MBUF 32
static void
prepare_pkt(struct rte_sched_port *port, struct rte_mbuf *mbuf)
{
- struct ether_hdr *eth_hdr;
- struct vlan_hdr *vlan1, *vlan2;
- struct ipv4_hdr *ip_hdr;
+ struct rte_ether_hdr *eth_hdr;
+ struct rte_vlan_hdr *vlan1, *vlan2;
+ struct rte_ipv4_hdr *ip_hdr;
/* Simulate a classifier */
- eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
- vlan1 = (struct vlan_hdr *)(ð_hdr->ether_type );
- vlan2 = (struct vlan_hdr *)((uintptr_t)ð_hdr->ether_type + sizeof(struct vlan_hdr));
- eth_hdr = (struct ether_hdr *)((uintptr_t)ð_hdr->ether_type + 2 *sizeof(struct vlan_hdr));
- ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type));
+ eth_hdr = rte_pktmbuf_mtod(mbuf, struct rte_ether_hdr *);
+ vlan1 = (struct rte_vlan_hdr *)(ð_hdr->ether_type);
+ vlan2 = (struct rte_vlan_hdr *)(
+ (uintptr_t)ð_hdr->ether_type + sizeof(struct rte_vlan_hdr));
+ eth_hdr = (struct rte_ether_hdr *)(
+ (uintptr_t)ð_hdr->ether_type +
+ 2 * sizeof(struct rte_vlan_hdr));
+ ip_hdr = (struct rte_ipv4_hdr *)(
+ (uintptr_t)eth_hdr + sizeof(eth_hdr->ether_type));
vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
- eth_hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_IPv4);
- ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);
+ eth_hdr->ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
+ ip_hdr->dst_addr = RTE_IPV4(0,0,TC,QUEUE);
rte_sched_port_pkt_write(port, mbuf, SUBPORT, PIPE, TC, QUEUE,
- e_RTE_METER_YELLOW);
+ RTE_COLOR_YELLOW);
/* 64 byte packet */
mbuf->pkt_len = 60;
err = rte_sched_subport_config(port, SUBPORT, subport_param);
TEST_ASSERT_SUCCESS(err, "Error config sched, err=%d\n", err);
- for (pipe = 0; pipe < port_param.n_pipes_per_subport; pipe ++) {
+ for (pipe = 0; pipe < subport_param[0].n_pipes_per_subport_enabled; pipe++) {
err = rte_sched_pipe_config(port, SUBPORT, pipe, 0);
TEST_ASSERT_SUCCESS(err, "Error config sched pipe %u, err=%d\n", pipe, err);
}
TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);
for (i = 0; i < 10; i++) {
- enum rte_meter_color color;
+ enum rte_color color;
uint32_t subport, traffic_class, queue;
color = rte_sched_port_pkt_read_color(out_mbufs[i]);
- TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");
+ TEST_ASSERT_EQUAL(color, RTE_COLOR_YELLOW, "Wrong color\n");
rte_sched_port_pkt_read_tree_path(port, out_mbufs[i],
&subport, &pipe, &traffic_class, &queue);