Fix spelling errors in comments including doxygen found using codespell.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Vladimir Medvedkin <vladimir.medvedkin@intel.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Acked-by: Ray Kinsella <mdr@ashroe.eu>
Acked-by: Cristian Dumitrescu <cristian.dumitrescu@intel.com>
Acked-by: Chenbo Xia <chenbo.xia@intel.com>
return nb_item;
/*
- * Then, check that all values in the list are differents.
+ * Then, check that all values in the list are different.
* No optimization here...
*/
for (i = 0; i < nb_item; i++) {
}
/*
- * Displays the RSS hash functions of a port, and, optionaly, the RSS hash
+ * Displays the RSS hash functions of a port, and, optionally, the RSS hash
* key of the port.
*/
void
{
port->mc_addr_nb--;
if (addr_idx == port->mc_addr_nb) {
- /* No need to recompact the set of multicast addressses. */
+ /* No need to recompact the set of multicast addresses. */
if (port->mc_addr_nb == 0) {
/* free the pool of multicast addresses. */
free(port->mc_addr_pool);
default:
break;
}
- return "Unkwown ARP op";
+ return "Unknown ARP op";
}
static const char *
*
* - Invoke ``test_atomic_exchange`` on each lcore. Before doing
* anything else, the cores wait for a synchronization event.
- * Each core then does the follwoing for N iterations:
+ * Each core then does the following for N iterations:
*
* Generate a new token with a data integrity check
* Exchange the new token for previously generated token
struct lcore_plock_test {
struct plock_test *pt[2]; /* shared, lock-protected data */
uint64_t sum[2]; /* local copy of the shared data */
- uint64_t iter; /* number of iterations to perfom */
+ uint64_t iter; /* number of iterations to perform */
uint32_t lc; /* given lcore id */
};
},
};
-/* String comparision impelementation, return 0 if equal else difference */
+/* String comparison implementation, return 0 if equal else difference */
static uint32_t
dummy_func5(const char *s1, const char *s2)
{
test_priv_data.all_decomp_data = &all_decomp_data;
test_priv_data.decomp_produced_data_size = &decomp_produced_data_size;
- test_priv_data.num_priv_xforms = 0; /* it's used for deompression only */
+ test_priv_data.num_priv_xforms = 0; /* it's used for decompression only */
capa = rte_compressdev_capability_get(0, RTE_COMP_ALGO_DEFLATE);
if (capa == NULL) {
rte_atomic32_inc(&obj_count);
}
- /* create mutiple times simultaneously */
+ /* create multiple times simultaneously */
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
hash_params.name = hash_name;
rte_atomic32_inc(&obj_count);
}
- /* create mutiple fbk tables simultaneously */
+ /* create multiple fbk tables simultaneously */
for (i = 0; i < MAX_ITER_MULTI; i++) {
snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
fbk_params.name = fbk_name;
rte_atomic32_inc(&obj_count);
}
- /* create mutiple fbk tables simultaneously */
+ /* create multiple fbk tables simultaneously */
for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
if (s == NULL)
return -ENOMEM;
- /* initiliaze SA crypto session for device */
+ /* initialize SA crypto session for device */
rc = rte_cryptodev_sym_session_init(dev_id, s,
ut->crypto_xforms, qp->mp_session_private);
if (rc == 0) {
first_tx_fail_idx = TEST_BAL_SLAVE_TX_FAIL_BURST_SIZE_1 -
TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT;
- /* copy mbuf referneces for expected transmission failures */
+ /* copy mbuf references for expected transmission failures */
for (i = 0; i < TEST_BAL_SLAVE_TX_FAIL_PACKETS_COUNT; i++)
expected_fail_pkts[i] = pkts_burst_1[i + first_tx_fail_idx];
status = rte_lpm_add(NULL, ip, depth, next_hop);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm_delete(NULL, ip, depth);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm_lookup(NULL, ip, &next_hop_return);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm6_add(NULL, ip, depth, next_hop);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm6_delete(NULL, ip, depth);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm6_lookup(NULL, ip, &next_hop_return);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm6_lookup_bulk_func(NULL, ip, next_hop_return, 10);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
status = rte_lpm6_delete_bulk_func(NULL, ip, depth, 10);
TEST_LPM_ASSERT(status < 0);
- /*Create vaild lpm to use in rest of test. */
+ /*Create valid lpm to use in rest of test. */
lpm = rte_lpm6_create(__func__, SOCKET_ID_ANY, &config);
TEST_LPM_ASSERT(lpm != NULL);
/*
* Call add, lookup and delete for a single rule with depth < 24.
* Check all the combinations for the first three bytes that result in a hit.
- * Delete the rule and check that the same test returs a miss.
+ * Delete the rule and check that the same test returns a miss.
*/
int32_t
test9(void)
* Call add, lookup and delete for a single rule with maximum 21bit next_hop
* size.
* Check that next_hop returned from lookup is equal to provisioned value.
- * Delete the rule and check that the same test returs a miss.
+ * Delete the rule and check that the same test returns a miss.
*/
int32_t
test28(void)
}
}
- /* Print warnign if only a single socket, but don't fail the test */
+ /* Print warning if only a single socket, but don't fail the test */
if (socket_count < 2)
printf("WARNING: realloc_socket test needs memory on multiple sockets!\n");
}
}
- /* Print warnign if only a single socket, but don't fail the test */
+ /* Print warning if only a single socket, but don't fail the test */
if (socket_count < 2) {
printf("WARNING: alloc_socket test needs memory on multiple sockets!\n");
}
rte_eal_mp_wait_lcore();
- /* check that we porcessed all references */
+ /* check that we processed all references */
tref = 0;
main_lcore = rte_get_main_lcore();
if (test_mempool_basic_ex(mp_nocache) < 0)
GOTO_ERR(ret, err);
- /* mempool operation test based on single producer and single comsumer */
+ /* mempool operation test based on single producer and single consumer */
if (test_mempool_sp_sc() < 0)
GOTO_ERR(ret, err);
static unsigned n_get_bulk;
static unsigned n_put_bulk;
-/* number of objects retrived from mempool before putting them back */
+/* number of objects retrieved from mempool before putting them back */
static unsigned n_keep;
/* number of enqueues / dequeues */
* if using blind check
*/
- /* previouly have a green, test points should keep unchanged */
+ /* previously have a green, test points should keep unchanged */
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
out[0] = RTE_COLOR_GREEN;
out[1] = RTE_COLOR_YELLOW;
* if using blind check
*/
- /* previouly have a green, test points should keep unchanged */
+ /* previously have a green, test points should keep unchanged */
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
out[0] = RTE_COLOR_GREEN;
out[1] = RTE_COLOR_YELLOW;
* if using blind check
*/
- /* previouly have a green, test points should keep unchanged */
+ /* previously have a green, test points should keep unchanged */
in[0] = in[1] = in[2] = in[3] = RTE_COLOR_GREEN;
out[0] = RTE_COLOR_GREEN;
out[1] = RTE_COLOR_YELLOW;
"rte_eth_dev_start: err=%d, port=%d\n",
ret, portid);
- /* always eanble promiscuous */
+ /* always enable promiscuous */
ret = rte_eth_promiscuous_enable(portid);
if (ret != 0)
rte_exit(EXIT_FAILURE,
return;
}
- /* Explicitelly stop timer 0. Once stop() called, we can even
+ /* Explicitly stop timer 0. Once stop() called, we can even
* erase the content of the structure: it is not referenced
* anymore by any code (in case of dynamic structure, it can
* be freed) */
* Each transition is 64 bit value with the following format:
* | node_type_specific : 32 | node_type : 3 | node_addr : 29 |
* For all node types except RTE_ACL_NODE_MATCH, node_addr is an index
- * to the start of the node in the transtions array.
+ * to the start of the node in the transitions array.
* Few different node types are used:
* RTE_ACL_NODE_MATCH:
* node_addr value is and index into an array that contains the return value
* RTE_ACL_NODE_SINGLE:
* always transitions to the same node regardless of the input value.
* RTE_ACL_NODE_DFA:
- * that node consits of up to 256 transitions.
+ * that node consists of up to 256 transitions.
* In attempt to conserve space all transitions are divided into 4 consecutive
* groups, by 64 transitions per group:
* group64[i] contains transitions[i * 64, .. i * 64 + 63].
/*
* Internal routine, performs 'build' phase of trie generation:
* - setups build context.
- * - analizes given set of rules.
+ * - analyzes given set of rules.
* - builds internal tree(s).
*/
static int
/* For each transition: put low 32 into tr_lo and high 32 into tr_hi */
ACL_TR_HILO(mm256, __m256, t0, t1, lo, hi);
- /* Keep transitions wth NOMATCH intact. */
+ /* Keep transitions with NOMATCH intact. */
*tr_lo = _mm256_blendv_epi8(*tr_lo, lo, matches);
*tr_hi = _mm256_blendv_epi8(*tr_hi, hi, matches);
}
}
/*
- * Find data alocated for the device or if not found return first unused bbdev
+ * Find data allocated for the device or if not found return first unused bbdev
* data. If all structures are in use and none is used by the device return
* NULL.
*/
uint32_t i;
int32_t spil, ofs;
- /* if we allready have an epilog generate a jump to it */
+ /* if we already have an epilog generate a jump to it */
if (st->exit.num++ != 0) {
emit_abs_jmp(st, st->exit.off);
return;
if (type == RTE_BPF_XTYPE_FUNC) {
/* we don't support multiple functions per BPF module,
- * so treat EBPF_PSEUDO_CALL to extrernal function
+ * so treat EBPF_PSEUDO_CALL to external function
* as an ordinary EBPF_CALL.
*/
if (ins[idx].src_reg == EBPF_PSEUDO_CALL) {
}
/*
- * BPF packet processing routinies.
+ * BPF packet processing routines.
*/
static inline uint32_t
/*
* make sure that instruction syntax is valid,
- * and it fields don't violate partciular instrcution type restrictions.
+ * and its fields don't violate particular instruction type restrictions.
*/
static const char *
check_syntax(const struct ebpf_insn *ins)
* First pass goes though all instructions in the set, checks that each
* instruction is a valid one (correct syntax, valid field values, etc.)
* and constructs control flow graph (CFG).
- * Then deapth-first search is performed over the constructed graph.
+ * Then depth-first search is performed over the constructed graph.
* Programs with unreachable instructions and/or loops will be rejected.
*/
static int
/*
* construct CFG, jcc nodes have to outgoing edges,
- * 'exit' nodes - none, all others nodes have exaclty one
+ * 'exit' nodes - none, all other nodes have exactly one
* outgoing edge.
*/
switch (ins->code) {
idx = get_node_idx(bvf, node);
op = ins[idx].code;
- /* for jcc node make a copy of evaluatoion state */
+ /* for jcc node make a copy of evaluation state */
if (node->nb_edge > 1)
rc |= save_eval_state(bvf, node);
/*
* Provides backwards compatibility when updating exported functions.
- * When a symol is exported from a library to provide an API, it also provides a
+ * When a symbol is exported from a library to provide an API, it also provides a
* calling convention (ABI) that is embodied in its name, return type,
* arguments, etc. On occasion that function may need to change to accommodate
* new functionality, behavior, etc. When that occurs, it is desirable to
* with the given regular expression pattern.
*
* @param pattern
- * regular expression notation decribing the pattern to match
+ * regular expression notation describing the pattern to match
*
* @param string
* source string to searcg for the pattern
RTE_ASSERT(head_idx < EVENT_BUFFER_SZ && tail_idx < EVENT_BUFFER_SZ);
- /* Determine the largest contigous run we can attempt to enqueue to the
+ /* Determine the largest contiguous run we can attempt to enqueue to the
* event device.
*/
if (head_idx > tail_idx)
/**
* Table of some irreducible polinomials over GF(2).
- * For lfsr they are reperesented in BE bit order, and
+ * For lfsr they are represented in BE bit order, and
* x^0 is masked out.
* For example, poly x^5 + x^2 + 1 will be represented
* as (101001b & 11111b) = 01001b = 0x9
/*
* Process new mbuf with fragment of IPV4 packet.
- * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
+ * Incoming mbuf should have it's l2_len/l3_len fields setup correctly.
* @param tbl
* Table where to lookup/add the fragmented packet.
* @param mb
/*
* packet checks for tunnel mode:
- * - same as for trasnport mode
+ * - same as for transport mode
* - esp tail next proto contains expected for that SA value
*/
static inline int32_t
static inline void
tun_process_step3(struct rte_mbuf *mb, uint64_t txof_msk, uint64_t txof_val)
{
- /* reset mbuf metatdata: L2/L3 len, packet type */
+ /* reset mbuf metadata: L2/L3 len, packet type */
mb->packet_type = RTE_PTYPE_UNKNOWN;
mb->tx_offload = (mb->tx_offload & txof_msk) | txof_val;
/*
* process outbound packets for SA with ESN support,
- * for algorithms that require SQN.hibits to be implictly included
+ * for algorithms that require SQN.hibits to be implicitly included
* into digest computation.
* In that case we have to move ICV bytes back to their proper place.
*/
* Inserts a rule into an appropriate hash table,
* updates the value for a given SPI in SPI_ONLY hash table
* reflecting presence of more specific rule type in two LSBs.
- * Updates a counter that reflects the number of rules whith the same SPI.
+ * Updates a counter that reflects the number of rules with the same SPI.
*/
static inline int
add_specific(struct rte_ipsec_sad *sad, const void *key,
/*
* RFC 4303 recommends 64 as minimum window size.
* there is no point to use ESN mode without SQN window,
- * so make sure we have at least 64 window when ESN is enalbed.
+ * so make sure we have at least 64 window when ESN is enabled.
*/
wsz = ((type & RTE_IPSEC_SATP_ESN_MASK) ==
RTE_IPSEC_SATP_ESN_DISABLE) ?
* In case of SA handled by multiple threads *sqn* cacheline
* could be shared by multiple cores.
* To minimise performance impact, we try to locate in a separate
- * place from other frequently accesed data.
+ * place from other frequently accessed data.
*/
union {
uint64_t outb;
/** GTP header length */
#define RTE_ETHER_GTP_HLEN \
(sizeof(struct rte_udp_hdr) + sizeof(struct rte_gtp_hdr))
-/* GTP next protocal type */
-#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocal type IPv4 */
-#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocal type IPv6 */
+/* GTP next protocol type */
+#define RTE_GTP_TYPE_IPV4 0x40 /**< GTP next protocol type IPv4 */
+#define RTE_GTP_TYPE_IPV6 0x60 /**< GTP next protocol type IPv6 */
/* GTP destination port number */
#define RTE_GTPC_UDP_PORT 2123 /**< GTP-C UDP destination port */
#define RTE_GTPU_UDP_PORT 2152 /**< GTP-U UDP destination port */
*
* Get the Ethernet Rx node.
*
- * @retrun
+ * @return
* Pointer to the Ethernet Rx node.
*/
struct rte_node_register *ethdev_rx_node_get(void);
*
* Get the Ethernet Tx node.
*
- * @retrun
+ * @return
* Pointer to the Ethernet Tx node.
*/
struct rte_node_register *ethdev_tx_node_get(void);
*
* Get the ipv4 rewrite node.
*
- * @retrun
+ * @return
* Pointer to the ipv4 rewrite node.
*/
struct rte_node_register *ip4_rewrite_node_get(void);
const char *group_id_field_name;
/** The set of fields used to select (through a hashing scheme) the
- * member within the current group. Inputs into the seletion operation.
+ * member within the current group. Inputs into the selection operation.
* Restriction: All the selector fields must be part of the same struct,
* i.e. part of the same header or part of the meta-data structure.
*/
goto out;
}
- /* Store the available frequncies into power context */
+ /* Store the available frequencies into power context */
for (i = 0, pi->nb_freqs = 0; i < count; i++) {
POWER_DEBUG_TRACE("Lcore %u frequency[%d]: %s\n", pi->lcore_id,
i, freqs[i]);
/* The reader can go offline only after the load of the
* data structure is completed. i.e. any load of the
- * data strcture can not move after this store.
+ * data structure can not move after this store.
*/
__atomic_store_n(&v->qsbr_cnt[thread_id].cnt,
}
/*
- * Traverses on subtree and retreeves more specific routes
+ * Traverses on subtree and retrieves more specific routes
* for a given in args ip/depth prefix
* last = NULL means the first invocation
*/
subport->qsize_add[0] = 0;
- /* Strict prority traffic class */
+ /* Strict priority traffic class */
for (i = 1; i < RTE_SCHED_TRAFFIC_CLASSES_PER_PIPE; i++)
subport->qsize_add[i] = subport->qsize_add[i-1] + subport->qsize[i-1];
/**
* Set split inflight descriptor.
*
- * This function save descriptors that has been comsumed in available
+ * This function save descriptors that has been consumed in available
* ring
*
* @param vid
/**
* Set packed inflight descriptor and get corresponding inflight entry
*
- * This function save descriptors that has been comsumed
+ * This function save descriptors that has been consumed
*
* @param vid
* vhost device ID