#include <rte_cycles.h>
#include <rte_flow.h>
#include <rte_errno.h>
-#ifdef RTE_LIBRTE_IXGBE_PMD
+#ifdef RTE_NET_IXGBE
#include <rte_pmd_ixgbe.h>
#endif
-#ifdef RTE_LIBRTE_I40E_PMD
+#ifdef RTE_NET_I40E
#include <rte_pmd_i40e.h>
#endif
-#ifdef RTE_LIBRTE_BNXT_PMD
+#ifdef RTE_NET_BNXT
#include <rte_pmd_bnxt.h>
#endif
#include <rte_gro.h>
printf("lcore %u not enabled\n", lcore_cpuid);
return -1;
}
- if (lcore_cpuid == rte_get_master_lcore()) {
+ if (lcore_cpuid == rte_get_main_lcore()) {
printf("lcore %u cannot be masked on for running "
- "packet forwarding, which is the master lcore "
+ "packet forwarding, which is the main lcore "
"and reserved for command line parsing only\n",
lcore_cpuid);
return -1;
}
}
+void
+show_rx_pkt_offsets(void)
+{
+ uint32_t i, n;
+
+ n = rx_pkt_nb_offs;
+ printf("Number of offsets: %u\n", n);
+ if (n) {
+ printf("Segment offsets: ");
+ for (i = 0; i != n - 1; i++)
+ printf("%hu,", rx_pkt_seg_offsets[i]);
+ printf("%hu\n", rx_pkt_seg_lengths[i]);
+ }
+}
+
+void
+set_rx_pkt_offsets(unsigned int *seg_offsets, unsigned int nb_offs)
+{
+ unsigned int i;
+
+ if (nb_offs >= MAX_SEGS_BUFFER_SPLIT) {
+ printf("nb segments per RX packets=%u >= "
+ "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_offs);
+ return;
+ }
+
+ /*
+ * No extra check here, the segment length will be checked by PMD
+ * in the extended queue setup.
+ */
+ for (i = 0; i < nb_offs; i++) {
+ if (seg_offsets[i] >= UINT16_MAX) {
+ printf("offset[%u]=%u > UINT16_MAX - give up\n",
+ i, seg_offsets[i]);
+ return;
+ }
+ }
+
+ for (i = 0; i < nb_offs; i++)
+ rx_pkt_seg_offsets[i] = (uint16_t) seg_offsets[i];
+
+ rx_pkt_nb_offs = (uint8_t) nb_offs;
+}
+
+void
+show_rx_pkt_segments(void)
+{
+ uint32_t i, n;
+
+ n = rx_pkt_nb_segs;
+ printf("Number of segments: %u\n", n);
+ if (n) {
+ printf("Segment sizes: ");
+ for (i = 0; i != n - 1; i++)
+ printf("%hu,", rx_pkt_seg_lengths[i]);
+ printf("%hu\n", rx_pkt_seg_lengths[i]);
+ }
+}
+
+void
+set_rx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
+{
+ unsigned int i;
+
+ if (nb_segs >= MAX_SEGS_BUFFER_SPLIT) {
+ printf("nb segments per RX packets=%u >= "
+ "MAX_SEGS_BUFFER_SPLIT - ignored\n", nb_segs);
+ return;
+ }
+
+ /*
+ * No extra check here, the segment length will be checked by PMD
+ * in the extended queue setup.
+ */
+ for (i = 0; i < nb_segs; i++) {
+ if (seg_lengths[i] >= UINT16_MAX) {
+ printf("length[%u]=%u > UINT16_MAX - give up\n",
+ i, seg_lengths[i]);
+ return;
+ }
+ }
+
+ for (i = 0; i < nb_segs; i++)
+ rx_pkt_seg_lengths[i] = (uint16_t) seg_lengths[i];
+
+ rx_pkt_nb_segs = (uint8_t) nb_segs;
+}
+
void
show_tx_pkt_segments(void)
{
}
void
-set_tx_pkt_segments(unsigned *seg_lengths, unsigned nb_segs)
+set_tx_pkt_segments(unsigned int *seg_lengths, unsigned int nb_segs)
{
uint16_t tx_pkt_len;
- unsigned i;
+ unsigned int i;
if (nb_segs_is_invalid(nb_segs))
return;
void
set_tx_pkt_times(unsigned int *tx_times)
{
- uint16_t port_id;
- int offload_found = 0;
- int offset;
- int flag;
-
- static const struct rte_mbuf_dynfield desc_offs = {
- .name = RTE_MBUF_DYNFIELD_TIMESTAMP_NAME,
- .size = sizeof(uint64_t),
- .align = __alignof__(uint64_t),
- };
- static const struct rte_mbuf_dynflag desc_flag = {
- .name = RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME,
- };
-
- RTE_ETH_FOREACH_DEV(port_id) {
- struct rte_eth_dev_info dev_info = { 0 };
- int ret;
-
- ret = rte_eth_dev_info_get(port_id, &dev_info);
- if (ret == 0 && dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_SEND_ON_TIMESTAMP) {
- offload_found = 1;
- break;
- }
- }
- if (!offload_found) {
- printf("No device supporting Tx timestamp scheduling found, "
- "dynamic flag and field not registered\n");
- return;
- }
- offset = rte_mbuf_dynfield_register(&desc_offs);
- if (offset < 0 && rte_errno != EEXIST)
- printf("Dynamic timestamp field registration error: %d",
- rte_errno);
- flag = rte_mbuf_dynflag_register(&desc_flag);
- if (flag < 0 && rte_errno != EEXIST)
- printf("Dynamic timestamp flag registration error: %d",
- rte_errno);
tx_pkt_times_inter = tx_times[0];
tx_pkt_times_intra = tx_times[1];
}
return NULL;
}
+#if defined(RTE_NET_I40E) || defined(RTE_NET_IXGBE)
+
static inline void
print_fdir_flex_mask(struct rte_eth_fdir_flex_conf *flex_conf, uint32_t num)
{
get_fdir_info(portid_t port_id, struct rte_eth_fdir_info *fdir_info,
struct rte_eth_fdir_stats *fdir_stat)
{
- int ret;
+ int ret = -ENOTSUP;
- ret = rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
- if (!ret) {
- rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
- RTE_ETH_FILTER_INFO, fdir_info);
- rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR,
- RTE_ETH_FILTER_STATS, fdir_stat);
- return 0;
- }
-
-#ifdef RTE_LIBRTE_I40E_PMD
+#ifdef RTE_NET_I40E
if (ret == -ENOTSUP) {
ret = rte_pmd_i40e_get_fdir_info(port_id, fdir_info);
if (!ret)
ret = rte_pmd_i40e_get_fdir_stats(port_id, fdir_stat);
}
#endif
-#ifdef RTE_LIBRTE_IXGBE_PMD
+#ifdef RTE_NET_IXGBE
if (ret == -ENOTSUP) {
ret = rte_pmd_ixgbe_get_fdir_info(port_id, fdir_info);
if (!ret)
fdir_stats_border, fdir_stats_border);
}
+#endif /* RTE_NET_I40E || RTE_NET_IXGBE */
+
void
fdir_set_flex_mask(portid_t port_id, struct rte_eth_fdir_flex_mask *cfg)
{
void
set_vf_traffic(portid_t port_id, uint8_t is_rx, uint16_t vf, uint8_t on)
{
-#ifdef RTE_LIBRTE_IXGBE_PMD
+#ifdef RTE_NET_IXGBE
int diag;
if (is_rx)
RTE_SET_USED(rate);
RTE_SET_USED(q_msk);
-#ifdef RTE_LIBRTE_IXGBE_PMD
+#ifdef RTE_NET_IXGBE
if (diag == -ENOTSUP)
diag = rte_pmd_ixgbe_set_vf_rate_limit(port_id, vf, rate,
q_msk);
#endif
-#ifdef RTE_LIBRTE_BNXT_PMD
+#ifdef RTE_NET_BNXT
if (diag == -ENOTSUP)
diag = rte_pmd_bnxt_set_vf_rate_limit(port_id, vf, rate, q_msk);
#endif
void
port_queue_region_info_display(portid_t port_id, void *buf)
{
-#ifdef RTE_LIBRTE_I40E_PMD
+#ifdef RTE_NET_I40E
uint16_t i, j;
struct rte_pmd_i40e_queue_regions *info =
(struct rte_pmd_i40e_queue_regions *)buf;