From 4a6672c2d301c105189ae74de73260af204c5ee8 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 11 Nov 2021 16:02:09 -0800 Subject: [PATCH] fix spelling in comments and doxygen Fix spelling errors in comments including doxygen found using codespell. Signed-off-by: Stephen Hemminger Acked-by: Konstantin Ananyev Acked-by: Vladimir Medvedkin Reviewed-by: Honnappa Nagarahalli Acked-by: Ray Kinsella Acked-by: Cristian Dumitrescu Acked-by: Chenbo Xia --- app/test-pmd/cmdline.c | 2 +- app/test-pmd/config.c | 4 ++-- app/test-pmd/icmpecho.c | 2 +- app/test/test_atomic.c | 2 +- app/test/test_barrier.c | 2 +- app/test/test_bpf.c | 2 +- app/test/test_compressdev.c | 2 +- app/test/test_func_reentrancy.c | 6 +++--- app/test/test_ipsec.c | 2 +- app/test/test_link_bonding.c | 2 +- app/test/test_lpm.c | 6 +++--- app/test/test_lpm6.c | 14 +++++++------- app/test/test_malloc.c | 4 ++-- app/test/test_mbuf.c | 2 +- app/test/test_mempool.c | 2 +- app/test/test_mempool_perf.c | 2 +- app/test/test_meter.c | 6 +++--- app/test/test_pmd_perf.c | 2 +- app/test/test_timer.c | 2 +- lib/acl/acl.h | 4 ++-- lib/acl/acl_bld.c | 2 +- lib/acl/acl_run_avx2.h | 2 +- lib/bbdev/rte_bbdev.c | 2 +- lib/bpf/bpf_jit_x86.c | 2 +- lib/bpf/bpf_load_elf.c | 2 +- lib/bpf/bpf_pkt.c | 2 +- lib/bpf/bpf_validate.c | 8 ++++---- lib/eal/include/rte_function_versioning.h | 2 +- lib/eal/windows/include/fnmatch.h | 2 +- lib/eventdev/rte_event_timer_adapter.c | 2 +- lib/hash/rte_thash.c | 2 +- lib/ip_frag/rte_ipv4_reassembly.c | 2 +- lib/ipsec/esp_inb.c | 4 ++-- lib/ipsec/esp_outb.c | 2 +- lib/ipsec/ipsec_sad.c | 2 +- lib/ipsec/sa.c | 2 +- lib/ipsec/sa.h | 2 +- lib/net/rte_gtp.h | 6 +++--- lib/node/ethdev_rx_priv.h | 2 +- lib/node/ethdev_tx_priv.h | 2 +- lib/node/ip4_rewrite_priv.h | 2 +- lib/pipeline/rte_swx_pipeline.h | 2 +- lib/power/power_acpi_cpufreq.c | 2 +- lib/rcu/rte_rcu_qsbr.h | 2 +- lib/rib/rte_rib6.c | 2 +- lib/sched/rte_sched.c | 2 +- lib/vhost/rte_vhost.h | 4 ++-- 47 files changed, 69 insertions(+), 69 deletions(-) diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c index 4f51b259fe..fb5433fd5b 100644 --- a/app/test-pmd/cmdline.c +++ b/app/test-pmd/cmdline.c @@ -3653,7 +3653,7 @@ parse_item_list(const char *str, const char *item_name, unsigned int max_items, return nb_item; /* - * Then, check that all values in the list are differents. + * Then, check that all values in the list are different. * No optimization here... */ for (i = 0; i < nb_item; i++) { diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c index 26cadf39f7..26318b4f14 100644 --- a/app/test-pmd/config.c +++ b/app/test-pmd/config.c @@ -2965,7 +2965,7 @@ port_rss_reta_info(portid_t port_id, } /* - * Displays the RSS hash functions of a port, and, optionaly, the RSS hash + * Displays the RSS hash functions of a port, and, optionally, the RSS hash * key of the port. */ void @@ -5250,7 +5250,7 @@ mcast_addr_pool_remove(struct rte_port *port, uint32_t addr_idx) { port->mc_addr_nb--; if (addr_idx == port->mc_addr_nb) { - /* No need to recompact the set of multicast addressses. */ + /* No need to recompact the set of multicast addresses. */ if (port->mc_addr_nb == 0) { /* free the pool of multicast addresses. */ free(port->mc_addr_pool); diff --git a/app/test-pmd/icmpecho.c b/app/test-pmd/icmpecho.c index 8f1d68a83a..d6620f5f6a 100644 --- a/app/test-pmd/icmpecho.c +++ b/app/test-pmd/icmpecho.c @@ -54,7 +54,7 @@ arp_op_name(uint16_t arp_op) default: break; } - return "Unkwown ARP op"; + return "Unknown ARP op"; } static const char * diff --git a/app/test/test_atomic.c b/app/test/test_atomic.c index ce0c259bd7..e4b997827e 100644 --- a/app/test/test_atomic.c +++ b/app/test/test_atomic.c @@ -88,7 +88,7 @@ * * - Invoke ``test_atomic_exchange`` on each lcore. Before doing * anything else, the cores wait for a synchronization event. - * Each core then does the follwoing for N iterations: + * Each core then does the following for N iterations: * * Generate a new token with a data integrity check * Exchange the new token for previously generated token diff --git a/app/test/test_barrier.c b/app/test/test_barrier.c index c27f8a0742..8fa93c0032 100644 --- a/app/test/test_barrier.c +++ b/app/test/test_barrier.c @@ -66,7 +66,7 @@ struct plock_test { struct lcore_plock_test { struct plock_test *pt[2]; /* shared, lock-protected data */ uint64_t sum[2]; /* local copy of the shared data */ - uint64_t iter; /* number of iterations to perfom */ + uint64_t iter; /* number of iterations to perform */ uint32_t lc; /* given lcore id */ }; diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c index 8f2414eb2b..8b28722515 100644 --- a/app/test/test_bpf.c +++ b/app/test/test_bpf.c @@ -2407,7 +2407,7 @@ static const struct ebpf_insn test_call5_prog[] = { }, }; -/* String comparision impelementation, return 0 if equal else difference */ +/* String comparison implementation, return 0 if equal else difference */ static uint32_t dummy_func5(const char *s1, const char *s2) { diff --git a/app/test/test_compressdev.c b/app/test/test_compressdev.c index a1b9f06250..c63b5b6737 100644 --- a/app/test/test_compressdev.c +++ b/app/test/test_compressdev.c @@ -2033,7 +2033,7 @@ test_deflate_comp_decomp(const struct interim_data_params *int_data, test_priv_data.all_decomp_data = &all_decomp_data; test_priv_data.decomp_produced_data_size = &decomp_produced_data_size; - test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */ + test_priv_data.num_priv_xforms = 0; /* it's used for decompression only */ capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE); if (capa == NULL) { diff --git a/app/test/test_func_reentrancy.c b/app/test/test_func_reentrancy.c index 838ab6f0f9..36e83bc587 100644 --- a/app/test/test_func_reentrancy.c +++ b/app/test/test_func_reentrancy.c @@ -253,7 +253,7 @@ hash_create_free(__rte_unused void *arg) rte_atomic32_inc(&obj_count); } - /* create mutiple times simultaneously */ + /* create multiple times simultaneously */ for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i); hash_params.name = hash_name; @@ -321,7 +321,7 @@ fbk_create_free(__rte_unused void *arg) rte_atomic32_inc(&obj_count); } - /* create mutiple fbk tables simultaneously */ + /* create multiple fbk tables simultaneously */ for (i = 0; i < MAX_ITER_MULTI; i++) { snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i); fbk_params.name = fbk_name; @@ -387,7 +387,7 @@ lpm_create_free(__rte_unused void *arg) rte_atomic32_inc(&obj_count); } - /* create mutiple fbk tables simultaneously */ + /* create multiple fbk tables simultaneously */ for (i = 0; i < MAX_LPM_ITER_TIMES; i++) { snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i); lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config); diff --git a/app/test/test_ipsec.c b/app/test/test_ipsec.c index 1bec63b0e8..bc2a3dbc2e 100644 --- a/app/test/test_ipsec.c +++ b/app/test/test_ipsec.c @@ -653,7 +653,7 @@ create_crypto_session(struct ipsec_unitest_params *ut, if (s == NULL) return -ENOMEM; - /* initiliaze SA crypto session for device */ + /* initialize SA crypto session for device */ rc = rte_cryptodev_sym_session_init(dev_id, s, ut->crypto_xforms, qp->mp_session_private); if (rc == 0) { diff --git a/app/test/test_link_bonding.c b/app/test/test_link_bonding.c index 8a9ef85178..dc6fc46b9c 100644 --- a/app/test/test_link_bonding.c +++ b/app/test/test_link_bonding.c @@ -3040,7 +3040,7 @@ test_balance_tx_burst_slave_tx_fail(void) first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 - TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; - /* copy mbuf referneces for expected transmission failures */ + /* copy mbuf references for expected transmission failures */ for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++) expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx]; diff --git a/app/test/test_lpm.c b/app/test/test_lpm.c index 556f5a67ba..37b460af3a 100644 --- a/app/test/test_lpm.c +++ b/app/test/test_lpm.c @@ -179,7 +179,7 @@ test3(void) status = rte_lpm_add(NULL, ip, depth, next_hop); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -217,7 +217,7 @@ test4(void) status = rte_lpm_delete(NULL, ip, depth); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -255,7 +255,7 @@ test5(void) status = rte_lpm_lookup(NULL, ip, &next_hop_return); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); diff --git a/app/test/test_lpm6.c b/app/test/test_lpm6.c index 0d664546fa..17221f992a 100644 --- a/app/test/test_lpm6.c +++ b/app/test/test_lpm6.c @@ -261,7 +261,7 @@ test4(void) status = rte_lpm6_add(NULL, ip, depth, next_hop); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -299,7 +299,7 @@ test5(void) status = rte_lpm6_delete(NULL, ip, depth); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -337,7 +337,7 @@ test6(void) status = rte_lpm6_lookup(NULL, ip, &next_hop_return); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -375,7 +375,7 @@ test7(void) status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -413,7 +413,7 @@ test8(void) status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10); TEST_LPM_ASSERT(status < 0); - /*Create vaild lpm to use in rest of test. */ + /*Create valid lpm to use in rest of test. */ lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config); TEST_LPM_ASSERT(lpm != NULL); @@ -433,7 +433,7 @@ test8(void) /* * Call add, lookup and delete for a single rule with depth < 24. * Check all the combinations for the first three bytes that result in a hit. - * Delete the rule and check that the same test returs a miss. + * Delete the rule and check that the same test returns a miss. */ int32_t test9(void) @@ -1738,7 +1738,7 @@ test27(void) * Call add, lookup and delete for a single rule with maximum 21bit next_hop * size. * Check that next_hop returned from lookup is equal to provisioned value. - * Delete the rule and check that the same test returs a miss. + * Delete the rule and check that the same test returns a miss. */ int32_t test28(void) diff --git a/app/test/test_malloc.c b/app/test/test_malloc.c index afff0de9f0..6d9249f831 100644 --- a/app/test/test_malloc.c +++ b/app/test/test_malloc.c @@ -603,7 +603,7 @@ test_realloc_numa(void) } } - /* Print warnign if only a single socket, but don't fail the test */ + /* Print warning if only a single socket, but don't fail the test */ if (socket_count < 2) printf("WARNING: realloc_socket test needs memory on multiple sockets!\n"); @@ -971,7 +971,7 @@ test_alloc_socket(void) } } - /* Print warnign if only a single socket, but don't fail the test */ + /* Print warning if only a single socket, but don't fail the test */ if (socket_count < 2) { printf("WARNING: alloc_socket test needs memory on multiple sockets!\n"); } diff --git a/app/test/test_mbuf.c b/app/test/test_mbuf.c index f93bcef8a9..75f3453946 100644 --- a/app/test/test_mbuf.c +++ b/app/test/test_mbuf.c @@ -1148,7 +1148,7 @@ test_refcnt_mbuf(void) rte_eal_mp_wait_lcore(); - /* check that we porcessed all references */ + /* check that we processed all references */ tref = 0; main_lcore = rte_get_main_lcore(); diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c index a451608558..f6c650d11f 100644 --- a/app/test/test_mempool.c +++ b/app/test/test_mempool.c @@ -994,7 +994,7 @@ test_mempool(void) if (test_mempool_basic_ex(mp_nocache) < 0) GOTO_ERR(ret, err); - /* mempool operation test based on single producer and single comsumer */ + /* mempool operation test based on single producer and single consumer */ if (test_mempool_sp_sc() < 0) GOTO_ERR(ret, err); diff --git a/app/test/test_mempool_perf.c b/app/test/test_mempool_perf.c index 8f629736e8..87ad251367 100644 --- a/app/test/test_mempool_perf.c +++ b/app/test/test_mempool_perf.c @@ -88,7 +88,7 @@ static uint32_t synchro; static unsigned n_get_bulk; static unsigned n_put_bulk; -/* number of objects retrived from mempool before putting them back */ +/* number of objects retrieved from mempool before putting them back */ static unsigned n_keep; /* number of enqueues / dequeues */ diff --git a/app/test/test_meter.c b/app/test/test_meter.c index f6fe6494ab..15d5a4839b 100644 --- a/app/test/test_meter.c +++ b/app/test/test_meter.c @@ -444,7 +444,7 @@ tm_test_srtcm_color_aware_check(void) * if using blind check */ - /* previouly have a green, test points should keep unchanged */ + /* previously have a green, test points should keep unchanged */ in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN; out[0] = RTE_COLOR_GREEN; out[1] = RTE_COLOR_YELLOW; @@ -551,7 +551,7 @@ tm_test_trtcm_color_aware_check(void) * if using blind check */ - /* previouly have a green, test points should keep unchanged */ + /* previously have a green, test points should keep unchanged */ in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN; out[0] = RTE_COLOR_GREEN; out[1] = RTE_COLOR_YELLOW; @@ -648,7 +648,7 @@ tm_test_trtcm_rfc4115_color_aware_check(void) * if using blind check */ - /* previouly have a green, test points should keep unchanged */ + /* previously have a green, test points should keep unchanged */ in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN; out[0] = RTE_COLOR_GREEN; out[1] = RTE_COLOR_YELLOW; diff --git a/app/test/test_pmd_perf.c b/app/test/test_pmd_perf.c index 1df86ce080..aac6c97ceb 100644 --- a/app/test/test_pmd_perf.c +++ b/app/test/test_pmd_perf.c @@ -752,7 +752,7 @@ test_pmd_perf(void) "rte_eth_dev_start: err=%d, port=%d\n", ret, portid); - /* always eanble promiscuous */ + /* always enable promiscuous */ ret = rte_eth_promiscuous_enable(portid); if (ret != 0) rte_exit(EXIT_FAILURE, diff --git a/app/test/test_timer.c b/app/test/test_timer.c index a10b2fe9da..631b737d16 100644 --- a/app/test/test_timer.c +++ b/app/test/test_timer.c @@ -432,7 +432,7 @@ timer_basic_cb(struct rte_timer *tim, void *arg) return; } - /* Explicitelly stop timer 0. Once stop() called, we can even + /* Explicitly stop timer 0. Once stop() called, we can even * erase the content of the structure: it is not referenced * anymore by any code (in case of dynamic structure, it can * be freed) */ diff --git a/lib/acl/acl.h b/lib/acl/acl.h index 4089ab2a04..f5739a475c 100644 --- a/lib/acl/acl.h +++ b/lib/acl/acl.h @@ -45,7 +45,7 @@ struct rte_acl_bitset { * Each transition is 64 bit value with the following format: * | node_type_specific : 32 | node_type : 3 | node_addr : 29 | * For all node types except RTE_ACL_NODE_MATCH, node_addr is an index - * to the start of the node in the transtions array. + * to the start of the node in the transitions array. * Few different node types are used: * RTE_ACL_NODE_MATCH: * node_addr value is and index into an array that contains the return value @@ -66,7 +66,7 @@ struct rte_acl_bitset { * RTE_ACL_NODE_SINGLE: * always transitions to the same node regardless of the input value. * RTE_ACL_NODE_DFA: - * that node consits of up to 256 transitions. + * that node consists of up to 256 transitions. * In attempt to conserve space all transitions are divided into 4 consecutive * groups, by 64 transitions per group: * group64[i] contains transitions[i * 64, .. i * 64 + 63]. diff --git a/lib/acl/acl_bld.c b/lib/acl/acl_bld.c index da10864cd8..f316d3e875 100644 --- a/lib/acl/acl_bld.c +++ b/lib/acl/acl_bld.c @@ -1494,7 +1494,7 @@ acl_set_data_indexes(struct rte_acl_ctx *ctx) /* * Internal routine, performs 'build' phase of trie generation: * - setups build context. - * - analizes given set of rules. + * - analyzes given set of rules. * - builds internal tree(s). */ static int diff --git a/lib/acl/acl_run_avx2.h b/lib/acl/acl_run_avx2.h index d06d2e8782..0b8967f22e 100644 --- a/lib/acl/acl_run_avx2.h +++ b/lib/acl/acl_run_avx2.h @@ -125,7 +125,7 @@ acl_process_matches_avx2x8(const struct rte_acl_ctx *ctx, /* For each transition: put low 32 into tr_lo and high 32 into tr_hi */ ACL_TR_HILO(mm256, __m256, t0, t1, lo, hi); - /* Keep transitions wth NOMATCH intact. */ + /* Keep transitions with NOMATCH intact. */ *tr_lo = _mm256_blendv_epi8(*tr_lo, lo, matches); *tr_hi = _mm256_blendv_epi8(*tr_hi, hi, matches); } diff --git a/lib/bbdev/rte_bbdev.c b/lib/bbdev/rte_bbdev.c index b86c5fdcc0..7f353d4f7e 100644 --- a/lib/bbdev/rte_bbdev.c +++ b/lib/bbdev/rte_bbdev.c @@ -138,7 +138,7 @@ rte_bbdev_data_alloc(void) } /* - * Find data alocated for the device or if not found return first unused bbdev + * Find data allocated for the device or if not found return first unused bbdev * data. If all structures are in use and none is used by the device return * NULL. */ diff --git a/lib/bpf/bpf_jit_x86.c b/lib/bpf/bpf_jit_x86.c index aa22ea78a0..518513376a 100644 --- a/lib/bpf/bpf_jit_x86.c +++ b/lib/bpf/bpf_jit_x86.c @@ -1245,7 +1245,7 @@ emit_epilog(struct bpf_jit_state *st) uint32_t i; int32_t spil, ofs; - /* if we allready have an epilog generate a jump to it */ + /* if we already have an epilog generate a jump to it */ if (st->exit.num++ != 0) { emit_abs_jmp(st, st->exit.off); return; diff --git a/lib/bpf/bpf_load_elf.c b/lib/bpf/bpf_load_elf.c index 2b11adeb5e..02a5d8ba0d 100644 --- a/lib/bpf/bpf_load_elf.c +++ b/lib/bpf/bpf_load_elf.c @@ -80,7 +80,7 @@ resolve_xsym(const char *sn, size_t ofs, struct ebpf_insn *ins, size_t ins_sz, if (type == RTE_BPF_XTYPE_FUNC) { /* we don't support multiple functions per BPF module, - * so treat EBPF_PSEUDO_CALL to extrernal function + * so treat EBPF_PSEUDO_CALL to external function * as an ordinary EBPF_CALL. */ if (ins[idx].src_reg == EBPF_PSEUDO_CALL) { diff --git a/lib/bpf/bpf_pkt.c b/lib/bpf/bpf_pkt.c index 08eebd99b3..af422afc07 100644 --- a/lib/bpf/bpf_pkt.c +++ b/lib/bpf/bpf_pkt.c @@ -166,7 +166,7 @@ bpf_eth_cbh_add(struct bpf_eth_cbh *cbh, uint16_t port, uint16_t queue) } /* - * BPF packet processing routinies. + * BPF packet processing routines. */ static inline uint32_t diff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c index 853279fee5..09331258eb 100644 --- a/lib/bpf/bpf_validate.c +++ b/lib/bpf/bpf_validate.c @@ -1730,7 +1730,7 @@ static const struct bpf_ins_check ins_chk[UINT8_MAX + 1] = { /* * make sure that instruction syntax is valid, - * and it fields don't violate partciular instrcution type restrictions. + * and its fields don't violate particular instruction type restrictions. */ static const char * check_syntax(const struct ebpf_insn *ins) @@ -1961,7 +1961,7 @@ log_loop(const struct bpf_verifier *bvf) * First pass goes though all instructions in the set, checks that each * instruction is a valid one (correct syntax, valid field values, etc.) * and constructs control flow graph (CFG). - * Then deapth-first search is performed over the constructed graph. + * Then depth-first search is performed over the constructed graph. * Programs with unreachable instructions and/or loops will be rejected. */ static int @@ -1988,7 +1988,7 @@ validate(struct bpf_verifier *bvf) /* * construct CFG, jcc nodes have to outgoing edges, - * 'exit' nodes - none, all others nodes have exaclty one + * 'exit' nodes - none, all other nodes have exactly one * outgoing edge. */ switch (ins->code) { @@ -2258,7 +2258,7 @@ evaluate(struct bpf_verifier *bvf) idx = get_node_idx(bvf, node); op = ins[idx].code; - /* for jcc node make a copy of evaluatoion state */ + /* for jcc node make a copy of evaluation state */ if (node->nb_edge > 1) rc |= save_eval_state(bvf, node); diff --git a/lib/eal/include/rte_function_versioning.h b/lib/eal/include/rte_function_versioning.h index 746a1e1992..eb6dd2bc17 100644 --- a/lib/eal/include/rte_function_versioning.h +++ b/lib/eal/include/rte_function_versioning.h @@ -15,7 +15,7 @@ /* * Provides backwards compatibility when updating exported functions. - * When a symol is exported from a library to provide an API, it also provides a + * When a symbol is exported from a library to provide an API, it also provides a * calling convention (ABI) that is embodied in its name, return type, * arguments, etc. On occasion that function may need to change to accommodate * new functionality, behavior, etc. When that occurs, it is desirable to diff --git a/lib/eal/windows/include/fnmatch.h b/lib/eal/windows/include/fnmatch.h index 142753c356..c272f65ccd 100644 --- a/lib/eal/windows/include/fnmatch.h +++ b/lib/eal/windows/include/fnmatch.h @@ -30,7 +30,7 @@ extern "C" { * with the given regular expression pattern. * * @param pattern - * regular expression notation decribing the pattern to match + * regular expression notation describing the pattern to match * * @param string * source string to searcg for the pattern diff --git a/lib/eventdev/rte_event_timer_adapter.c b/lib/eventdev/rte_event_timer_adapter.c index e5572e2add..9dad170b5a 100644 --- a/lib/eventdev/rte_event_timer_adapter.c +++ b/lib/eventdev/rte_event_timer_adapter.c @@ -525,7 +525,7 @@ event_buffer_flush(struct event_buffer *bufp, uint8_t dev_id, uint8_t port_id, RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ); - /* Determine the largest contigous run we can attempt to enqueue to the + /* Determine the largest contiguous run we can attempt to enqueue to the * event device. */ if (head_idx > tail_idx) diff --git a/lib/hash/rte_thash.c b/lib/hash/rte_thash.c index 394e1328c5..6847e36f4b 100644 --- a/lib/hash/rte_thash.c +++ b/lib/hash/rte_thash.c @@ -28,7 +28,7 @@ EAL_REGISTER_TAILQ(rte_thash_tailq) /** * Table of some irreducible polinomials over GF(2). - * For lfsr they are reperesented in BE bit order, and + * For lfsr they are represented in BE bit order, and * x^0 is masked out. * For example, poly x^5 + x^2 + 1 will be represented * as (101001b & 11111b) = 01001b = 0x9 diff --git a/lib/ip_frag/rte_ipv4_reassembly.c b/lib/ip_frag/rte_ipv4_reassembly.c index 69666c8b82..4a89a5f536 100644 --- a/lib/ip_frag/rte_ipv4_reassembly.c +++ b/lib/ip_frag/rte_ipv4_reassembly.c @@ -80,7 +80,7 @@ ipv4_frag_reassemble(struct ip_frag_pkt *fp) /* * Process new mbuf with fragment of IPV4 packet. - * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty. + * Incoming mbuf should have it's l2_len/l3_len fields setup correctly. * @param tbl * Table where to lookup/add the fragmented packet. * @param mb diff --git a/lib/ipsec/esp_inb.c b/lib/ipsec/esp_inb.c index 99e9c43f2f..636c850fa6 100644 --- a/lib/ipsec/esp_inb.c +++ b/lib/ipsec/esp_inb.c @@ -475,7 +475,7 @@ trs_process_check(struct rte_mbuf *mb, struct rte_mbuf **ml, /* * packet checks for tunnel mode: - * - same as for trasnport mode + * - same as for transport mode * - esp tail next proto contains expected for that SA value */ static inline int32_t @@ -561,7 +561,7 @@ trs_process_step3(struct rte_mbuf *mb) static inline void tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val) { - /* reset mbuf metatdata: L2/L3 len, packet type */ + /* reset mbuf metadata: L2/L3 len, packet type */ mb->packet_type = RTE_PTYPE_UNKNOWN; mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val; diff --git a/lib/ipsec/esp_outb.c b/lib/ipsec/esp_outb.c index b7a70fd001..672e56aba0 100644 --- a/lib/ipsec/esp_outb.c +++ b/lib/ipsec/esp_outb.c @@ -614,7 +614,7 @@ cpu_outb_trs_pkt_prepare(const struct rte_ipsec_session *ss, /* * process outbound packets for SA with ESN support, - * for algorithms that require SQN.hibits to be implictly included + * for algorithms that require SQN.hibits to be implicitly included * into digest computation. * In that case we have to move ICV bytes back to their proper place. */ diff --git a/lib/ipsec/ipsec_sad.c b/lib/ipsec/ipsec_sad.c index 3f9533c80a..531e1e323c 100644 --- a/lib/ipsec/ipsec_sad.c +++ b/lib/ipsec/ipsec_sad.c @@ -62,7 +62,7 @@ EAL_REGISTER_TAILQ(rte_ipsec_sad_tailq) * Inserts a rule into an appropriate hash table, * updates the value for a given SPI in SPI_ONLY hash table * reflecting presence of more specific rule type in two LSBs. - * Updates a counter that reflects the number of rules whith the same SPI. + * Updates a counter that reflects the number of rules with the same SPI. */ static inline int add_specific(struct rte_ipsec_sad *sad, const void *key, diff --git a/lib/ipsec/sa.c b/lib/ipsec/sa.c index a19819f9f1..1e51482c92 100644 --- a/lib/ipsec/sa.c +++ b/lib/ipsec/sa.c @@ -136,7 +136,7 @@ ipsec_sa_size(uint64_t type, uint32_t *wnd_sz, uint32_t *nb_bucket) /* * RFC 4303 recommends 64 as minimum window size. * there is no point to use ESN mode without SQN window, - * so make sure we have at least 64 window when ESN is enalbed. + * so make sure we have at least 64 window when ESN is enabled. */ wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) == RTE_IPSEC_SATP_ESN_DISABLE) ? diff --git a/lib/ipsec/sa.h b/lib/ipsec/sa.h index 6e59f18e16..7503587b50 100644 --- a/lib/ipsec/sa.h +++ b/lib/ipsec/sa.h @@ -122,7 +122,7 @@ struct rte_ipsec_sa { * In case of SA handled by multiple threads *sqn* cacheline * could be shared by multiple cores. * To minimise performance impact, we try to locate in a separate - * place from other frequently accesed data. + * place from other frequently accessed data. */ union { uint64_t outb; diff --git a/lib/net/rte_gtp.h b/lib/net/rte_gtp.h index 9f6deb9c7e..dca940c2c5 100644 --- a/lib/net/rte_gtp.h +++ b/lib/net/rte_gtp.h @@ -142,9 +142,9 @@ struct rte_gtp_psc_type1_hdr { /** GTP header length */ #define RTE_ETHER_GTP_HLEN \ (sizeof(struct rte_udp_hdr) + sizeof(struct rte_gtp_hdr)) -/* GTP next protocal type */ -#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocal type IPv4 */ -#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocal type IPv6 */ +/* GTP next protocol type */ +#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocol type IPv4 */ +#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocol type IPv6 */ /* GTP destination port number */ #define RTE_GTPC_UDP_PORT 2123 /**< GTP-C UDP destination port */ #define RTE_GTPU_UDP_PORT 2152 /**< GTP-U UDP destination port */ diff --git a/lib/node/ethdev_rx_priv.h b/lib/node/ethdev_rx_priv.h index 21dcba51fc..7f24cf962e 100644 --- a/lib/node/ethdev_rx_priv.h +++ b/lib/node/ethdev_rx_priv.h @@ -67,7 +67,7 @@ struct ethdev_rx_node_main *ethdev_rx_get_node_data_get(void); * * Get the Ethernet Rx node. * - * @retrun + * @return * Pointer to the Ethernet Rx node. */ struct rte_node_register *ethdev_rx_node_get(void); diff --git a/lib/node/ethdev_tx_priv.h b/lib/node/ethdev_tx_priv.h index e3a6fdb8af..93744df56a 100644 --- a/lib/node/ethdev_tx_priv.h +++ b/lib/node/ethdev_tx_priv.h @@ -46,7 +46,7 @@ struct ethdev_tx_node_main *ethdev_tx_node_data_get(void); * * Get the Ethernet Tx node. * - * @retrun + * @return * Pointer to the Ethernet Tx node. */ struct rte_node_register *ethdev_tx_node_get(void); diff --git a/lib/node/ip4_rewrite_priv.h b/lib/node/ip4_rewrite_priv.h index a1fb8668c5..5105ec1d29 100644 --- a/lib/node/ip4_rewrite_priv.h +++ b/lib/node/ip4_rewrite_priv.h @@ -49,7 +49,7 @@ struct ip4_rewrite_node_main { * * Get the ipv4 rewrite node. * - * @retrun + * @return * Pointer to the ipv4 rewrite node. */ struct rte_node_register *ip4_rewrite_node_get(void); diff --git a/lib/pipeline/rte_swx_pipeline.h b/lib/pipeline/rte_swx_pipeline.h index 9c3d08199c..77141bd341 100644 --- a/lib/pipeline/rte_swx_pipeline.h +++ b/lib/pipeline/rte_swx_pipeline.h @@ -647,7 +647,7 @@ struct rte_swx_pipeline_selector_params { const char *group_id_field_name; /** The set of fields used to select (through a hashing scheme) the - * member within the current group. Inputs into the seletion operation. + * member within the current group. Inputs into the selection operation. * Restriction: All the selector fields must be part of the same struct, * i.e. part of the same header or part of the meta-data structure. */ diff --git a/lib/power/power_acpi_cpufreq.c b/lib/power/power_acpi_cpufreq.c index 1e8aeb8403..402ed8c99b 100644 --- a/lib/power/power_acpi_cpufreq.c +++ b/lib/power/power_acpi_cpufreq.c @@ -159,7 +159,7 @@ power_get_available_freqs(struct acpi_power_info *pi) goto out; } - /* Store the available frequncies into power context */ + /* Store the available frequencies into power context */ for (i = 0, pi->nb_freqs = 0; i < count; i++) { POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id, i, freqs[i]); diff --git a/lib/rcu/rte_rcu_qsbr.h b/lib/rcu/rte_rcu_qsbr.h index 18811c1cc1..62a420a785 100644 --- a/lib/rcu/rte_rcu_qsbr.h +++ b/lib/rcu/rte_rcu_qsbr.h @@ -362,7 +362,7 @@ rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id) /* The reader can go offline only after the load of the * data structure is completed. i.e. any load of the - * data strcture can not move after this store. + * data structure can not move after this store. */ __atomic_store_n(&v->qsbr_cnt[thread_id].cnt, diff --git a/lib/rib/rte_rib6.c b/lib/rib/rte_rib6.c index b00c7fbf5a..70405113b4 100644 --- a/lib/rib/rte_rib6.c +++ b/lib/rib/rte_rib6.c @@ -199,7 +199,7 @@ rte_rib6_lookup_exact(struct rte_rib6 *rib, } /* - * Traverses on subtree and retreeves more specific routes + * Traverses on subtree and retrieves more specific routes * for a given in args ip/depth prefix * last = NULL means the first invocation */ diff --git a/lib/sched/rte_sched.c b/lib/sched/rte_sched.c index 2fe32bbd33..ed44808f7b 100644 --- a/lib/sched/rte_sched.c +++ b/lib/sched/rte_sched.c @@ -590,7 +590,7 @@ rte_sched_subport_config_qsize(struct rte_sched_subport *subport) subport->qsize_add[0] = 0; - /* Strict prority traffic class */ + /* Strict priority traffic class */ for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++) subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1]; diff --git a/lib/vhost/rte_vhost.h b/lib/vhost/rte_vhost.h index af0afbcf60..b454c05868 100644 --- a/lib/vhost/rte_vhost.h +++ b/lib/vhost/rte_vhost.h @@ -796,7 +796,7 @@ rte_vhost_get_vhost_ring_inflight(int vid, uint16_t vring_idx, /** * Set split inflight descriptor. * - * This function save descriptors that has been comsumed in available + * This function save descriptors that has been consumed in available * ring * * @param vid @@ -815,7 +815,7 @@ rte_vhost_set_inflight_desc_split(int vid, uint16_t vring_idx, /** * Set packed inflight descriptor and get corresponding inflight entry * - * This function save descriptors that has been comsumed + * This function save descriptors that has been consumed * * @param vid * vhost device ID -- 2.20.1