" help ports : Configuring ports.\n"
" help registers : Reading and setting port registers.\n"
" help filters : Filters configuration help.\n"
- " help traffic_management : Traffic Management commmands.\n"
+ " help traffic_management : Traffic Management commands.\n"
" help devices : Device related cmds.\n"
" help all : All of the above sections.\n\n"
);
if (test_done == 0) {
printf("Before setting GSO segsz, please first"
- " stop fowarding\n");
+ " stop forwarding\n");
return;
}
tref += refcnt_lcore[slave];
if (tref != refcnt_lcore[master])
- rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
+ rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
tref, refcnt_lcore[master]);
rte_mempool_dump(stdout, refcnt_pool);
NPC_LT_LC_FCOE,
};
-/* Don't modify Ltypes upto SCTP, otherwise it will
+/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_ld_ltype {
NPC_LT_LG_TU_ETHER_IN_NSH,
};
-/* Don't modify Ltypes upto SCTP, otherwise it will
+/* Don't modify Ltypes up to SCTP, otherwise it will
* effect flow tag calculation and thus RSS.
*/
enum npc_kpu_lh_ltype {
qp->name = name;
- /* Create completion queue upto max_inflight_ops */
+ /* Create completion queue up to max_inflight_ops */
qp->processed_pkts = zip_pmd_qp_create_processed_pkts_ring(qp,
max_inflight_ops, socket_id);
if (qp->processed_pkts == NULL)
dev_info->max_event_priority_levels =
DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
- /* we only support dpio upto number of cores*/
+ /* we only support dpio up to number of cores */
if (dev_info->max_event_ports > rte_lcore_count())
dev_info->max_event_ports = rte_lcore_count();
dev_info->max_event_port_dequeue_depth =
}
if (hash_types) {
PMD_DRV_LOG(ERR,
- "Unknwon RSS config from firmware (%08x), RSS disabled",
+ "Unknown RSS config from firmware (%08x), RSS disabled",
vnic->hash_type);
return -ENOTSUP;
}
if (val->index > 0x7)
return rte_flow_error_set(e, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "port index upto 0x7 is supported");
+ "port index up to 0x7 is supported");
CXGBE_FILL_FS(val->index, mask->index, iport);
key_iova = (size_t)rte_malloc(NULL, 256, 64);
if (!key_iova) {
DPAA2_PMD_ERR(
- "Memory allocation failure for rule configration\n");
+ "Memory allocation failure for rule configuration\n");
goto mem_failure;
}
mask_iova = (size_t)rte_malloc(NULL, 256, 64);
if (!mask_iova) {
DPAA2_PMD_ERR(
- "Memory allocation failure for rule configration\n");
+ "Memory allocation failure for rule configuration\n");
goto mem_failure;
}
(2 * DIST_PARAM_IOVA_SIZE), RTE_CACHE_LINE_SIZE);
if (!flow) {
DPAA2_PMD_ERR(
- "Memory allocation failure for rule configration\n");
+ "Memory allocation failure for rule configuration\n");
goto creation_error;
}
key_iova = (void *)((size_t)flow + sizeof(struct rte_flow));
err = alloc_mbox_info(func_to_func->mbox_resp);
if (err) {
- PMD_DRV_LOG(ERR, "Allocating memory for mailbox responsing failed");
+ PMD_DRV_LOG(ERR, "Allocating memory for mailbox responding failed");
goto alloc_mbox_for_resp_err;
}
/*
* Calculate the number of available resources - elts and WQEs.
* There are two possible different scenarios:
- * - no data inlining into WQEs, one WQEBB may contains upto
+ * - no data inlining into WQEs, one WQEBB may contains up to
* four packets, in this case elts become scarce resource
* - data inlining into WQEs, one packet may require multiple
* WQEBBs, the WQEs become the limiting factor.
#include "pfe_logs.h"
#include "pfe_mod.h"
-#define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/
+#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */
#define PFE_VDEV_GEM_ID_ARG "intf"
struct pfe_vdev_init_params {
qede_reset_queue_stats(qdev, true);
/* Newer SR-IOV PF driver expects RX/TX queues to be started before
- * enabling RSS. Hence RSS configuration is deferred upto this point.
+ * enabling RSS. Hence RSS configuration is deferred up to this point.
* Also, we would like to retain similar behavior in PF case, so we
* don't do PF/VF specific check here.
*/
#if defined(RTE_LIBRTE_KNI) && LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
} else if (rte_eal_check_module("rte_kni") == 1) {
iova_mode = RTE_IOVA_PA;
- RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI perfomance.\n");
+ RTE_LOG(DEBUG, EAL, "KNI is loaded, selecting IOVA as PA mode for better KNI performance.\n");
#endif
} else if (is_iommu_enabled()) {
/* we have an IOMMU, pick IOVA as VA mode */
* sqn and replay window
* In case of SA handled by multiple threads *sqn* cacheline
* could be shared by multiple cores.
- * To minimise perfomance impact, we try to locate in a separate
+ * To minimise performance impact, we try to locate in a separate
* place from other frequently accesed data.
*/
union {