names = rte_malloc(NULL, sizeof(struct rte_metric_name) * len, 0);
if (names == NULL) {
- printf("Cannot allocate memory for metrcis names\n");
+ printf("Cannot allocate memory for metrics names\n");
rte_free(metrics);
return;
}
caplevel.n_nodes_max,
caplevel.n_nodes_nonleaf_max,
caplevel.n_nodes_leaf_max);
- printf("\t -- indetical: non leaf %u leaf %u\n",
+ printf("\t -- identical: non leaf %u leaf %u\n",
caplevel.non_leaf_nodes_identical,
caplevel.leaf_nodes_identical);
printf(" - Name (%s) on socket (%d)\n"
" - flags:\n"
"\t -- Single Producer Enqueue (%u)\n"
- "\t -- Single Consmer Dequeue (%u)\n",
+ "\t -- Single Consumer Dequeue (%u)\n",
ptr->name,
ptr->memzone->socket_id,
ptr->flags & RING_F_SP_ENQ,
}
/*
- * Parses IPV6 address, exepcts the following format:
- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
+ * Parse IPv6 address, expects the following format:
+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X is a hexadecimal digit).
*/
static int
parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
"should be either 1 or multiple of %zu, "
"but not greater then %u]\n"
"[--" OPT_MAX_SIZE
- "=<size limit (in bytes) for runtime ACL strucutures> "
+ "=<size limit (in bytes) for runtime ACL structures> "
"leave 0 for default behaviour]\n"
"[--" OPT_ITER_NUM "=<number of iterations to perform>]\n"
"[--" OPT_VERBOSE "=<verbose level>]\n"
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
if (ops == NULL) {
RTE_LOG(ERR, USER1,
- "Can't allocate memory for ops strucures\n");
+ "Can't allocate memory for ops structures\n");
return -1;
}
uint64_t comp_flags = cap->comp_feature_flags;
- /* Huffman enconding */
+ /* Huffman encoding */
if (test_data->huffman_enc == RTE_COMP_HUFFMAN_FIXED &&
(comp_flags & RTE_COMP_FF_HUFFMAN_FIXED) == 0) {
RTE_LOG(ERR, USER1,
* queue, so we never get any failed enqs unless the driver won't accept
* the exact number of descriptors we requested, or the driver won't
* wrap around the end of the TX ring. However, since we're only
- * dequeueing once we've filled up the queue, we have to benchmark it
+ * dequeuing once we've filled up the queue, we have to benchmark it
* piecemeal and then average out the results.
*/
cur_op = 0;
"\t--deq_tmo_nsec : global dequeue timeout\n"
"\t--prod_type_ethdev : use ethernet device as producer.\n"
"\t--prod_type_timerdev : use event timer device as producer.\n"
- "\t expity_nsec would be the timeout\n"
+ "\t expiry_nsec would be the timeout\n"
"\t in ns.\n"
"\t--prod_type_timerdev_burst : use timer device as producer\n"
"\t burst mode.\n"
order_opt_dump(struct evt_options *opt)
{
evt_dump_producer_lcores(opt);
- evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
+ evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
evt_dump_worker_lcores(opt);
evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt));
}
"(if -f is not specified)>]\n"
"[-r <percentage ratio of random ip's to lookup"
"(if -t is not specified)>]\n"
- "[-c <do comarison with LPM library>]\n"
+ "[-c <do comparison with LPM library>]\n"
"[-6 <do tests with ipv6 (default ipv4)>]\n"
"[-s <shuffle randomly generated routes>]\n"
"[-a <check nexthops for all ipv4 address space"
"[-g <number of tbl8's for dir24_8 or trie FIBs>]\n"
"[-w <path to the file to dump routing table>]\n"
"[-u <path to the file to dump ip's for lookup>]\n"
- "[-v <type of loookup function:"
+ "[-v <type of lookup function:"
"\ts1, s2, s3 (3 types of scalar), v (vector) -"
" for DIR24_8 based FIB\n"
"\ts, v - for TRIE based ipv6 FIB>]\n",
#define PORT_ID_DST 1
#define TEID_VALUE 1
-/* Flow items/acctions max size */
+/* Flow items/actions max size */
#define MAX_ITEMS_NUM 32
#define MAX_ACTIONS_NUM 32
#define MAX_ATTRS_NUM 16
* threads time.
*
* Throughput: total count of rte rules divided
- * over the average of the time cosumed by all
+ * over the average of the time consumed by all
* threads time.
*/
double insertion_latency_time;
" Set the option to enable display of RX and TX bursts.\n"
"set port (port_id) vf (vf_id) rx|tx on|off\n"
- " Enable/Disable a VF receive/tranmit from a port\n\n"
+ " Enable/Disable a VF receive/transmit from a port\n\n"
"set port (port_id) vf (vf_id) rxmode (AUPE|ROPE|BAM"
"|MPE) (on|off)\n"
},
[COMMON_POLICY_ID] = {
.name = "{policy_id}",
- .type = "POLCIY_ID",
+ .type = "POLICY_ID",
.help = "policy id",
.call = parse_int,
.comp = comp_none,
},
[TUNNEL_DESTROY] = {
.name = "destroy",
- .help = "destroy tunel",
+ .help = "destroy tunnel",
.next = NEXT(NEXT_ENTRY(TUNNEL_DESTROY_ID),
NEXT_ENTRY(COMMON_PORT_ID)),
.args = ARGS(ARGS_ENTRY(struct buffer, port)),
},
[TUNNEL_DESTROY_ID] = {
.name = "id",
- .help = "tunnel identifier to testroy",
+ .help = "tunnel identifier to destroy",
.next = NEXT(NEXT_ENTRY(COMMON_UNSIGNED)),
.args = ARGS(ARGS_ENTRY(struct tunnel_ops, id)),
.call = parse_tunnel,
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SHARED_SHAPERS]
= "num shared shapers field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_WFQ_WEIGHT_MODE]
- = "wfq weght mode field (node params)",
+ = "wfq weight mode field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_N_SP_PRIORITIES]
= "num strict priorities field (node params)",
[RTE_TM_ERROR_TYPE_NODE_PARAMS_CMAN]
cmdline_parse_inst_t cmd_show_port_tm_level_cap = {
.f = cmd_show_port_tm_level_cap_parsed,
.data = NULL,
- .help_str = "Show Port TM Hierarhical level Capabilities",
+ .help_str = "Show port TM hierarchical level capabilities",
.tokens = {
(void *)&cmd_show_port_tm_level_cap_show,
(void *)&cmd_show_port_tm_level_cap_port,
*
* The testpmd command line for this forward engine sets the flags
* TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
- * wether a checksum must be calculated in software or in hardware. The
+ * whether a checksum must be calculated in software or in hardware. The
* IP, UDP, TCP and SCTP flags always concern the inner layer. The
* OUTER_IP is only useful for tunnel packets.
*/
"If the drop-queue doesn't exist, the packet is dropped. "
"By default drop-queue=127.\n");
#ifdef RTE_LIB_LATENCYSTATS
- printf(" --latencystats=N: enable latency and jitter statistcs "
+ printf(" --latencystats=N: enable latency and jitter statistics "
"monitoring on forwarding lcore id N.\n");
#endif
printf(" --disable-crc-strip: disable CRC stripping by hardware.\n");
uint8_t latencystats_enabled;
/*
- * Lcore ID to serive latency statistics.
+ * Lcore ID to service latency statistics.
*/
lcoreid_t latencystats_lcore_id = -1;
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr) +
sizeof(struct rte_udp_hdr)));
- /* updata udp pkt length */
+ /* update UDP packet length */
udp_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_udp_hdr *,
sizeof(struct rte_ether_hdr) +
sizeof(struct rte_ipv4_hdr));
pkt_len = (uint16_t) (pkt_data_len + sizeof(struct rte_udp_hdr));
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_len);
- /* updata ip pkt length and csum */
+ /* update IP packet length and checksum */
ip_hdr = rte_pktmbuf_mtod_offset(pkt, struct rte_ipv4_hdr *,
sizeof(struct rte_ether_hdr));
ip_hdr->hdr_checksum = 0;
* (https://en.wikipedia.org/wiki/Peterson%27s_algorithm)
* for two execution units to make sure that rte_smp_mb() prevents
* store-load reordering to happen.
- * Also when executed on a single lcore could be used as a approxiamate
+ * Also when executed on a single lcore could be used as a approximate
* estimation of number of cycles particular implementation of rte_smp_mb()
* will take.
*/
/*
* Basic functional tests for librte_bpf.
* The main procedure - load eBPF program, execute it and
- * compare restuls with expected values.
+ * compare results with expected values.
*/
struct dummy_offset {
}
/*
- * same as ld_mbuf1, but then trancate the mbuf by 1B,
+ * same as ld_mbuf1, but then truncate the mbuf by 1B,
* so load of last 4B fail.
*/
static void
/*
* Store original operation index in private data,
* since ordering does not have to be maintained,
- * when dequeueing from compressdev, so a comparison
+ * when dequeuing from compressdev, so a comparison
* at the end of the test can be done.
*/
priv_data = (struct priv_op_data *) (ops[i] + 1);
}
/*
- * Function prepare data for hash veryfication test case.
+ * Function prepare data for hash verification test case.
* Digest is allocated in 4 last bytes in plaintext, pattern.
*/
snow3g_hash_test_vector_setup(&snow3g_test_case_7, &snow3g_hash_data);
fib = rte_fib_create(__func__, SOCKET_ID_ANY, &config);
TEST_FIB_ASSERT(fib != NULL);
- /* Measue add. */
+ /* Measure add. */
begin = rte_rdtsc();
for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
/* Check with the invalid parameters */
if (rte_kni_register_handlers(kni, NULL) == 0) {
- printf("Unexpectedly register successuflly "
+ printf("Unexpectedly register successfully "
"with NULL ops pointer\n");
exit(-1);
}
/**
* Check multiple processes support on
- * registerring/unregisterring handlers.
+ * registering/unregistering handlers.
*/
if (test_kni_register_handler_mp() < 0) {
printf("fail to check multiple process support\n");
#include "test.h"
-/* incrementd in handler, to check it is properly called once per
+/* incremented in handler, to check it is properly called once per
* key/value association */
static unsigned count;
goto fail;
}
count = 0;
- /* call check_handler() for all entries with key="unexistant_key" */
- if (rte_kvargs_process(kvlist, "unexistant_key", check_handler, NULL) < 0) {
+ /* call check_handler() for all entries with key="nonexistent_key" */
+ if (rte_kvargs_process(kvlist, "nonexistent_key", check_handler, NULL) < 0) {
printf("rte_kvargs_process() error\n");
rte_kvargs_free(kvlist);
goto fail;
}
if (count != 0) {
- printf("invalid count value %d after rte_kvargs_process(unexistant_key)\n",
+ printf("invalid count value %d after rte_kvargs_process(nonexistent_key)\n",
count);
rte_kvargs_free(kvlist);
goto fail;
rte_kvargs_free(kvlist);
goto fail;
}
- /* count all entries with key="unexistant_key" */
- count = rte_kvargs_count(kvlist, "unexistant_key");
+ /* count all entries with key="nonexistent_key" */
+ count = rte_kvargs_count(kvlist, "nonexistent_key");
if (count != 0) {
- printf("invalid count value %d after rte_kvargs_count(unexistant_key)\n",
+ printf("invalid count value %d after rte_kvargs_count(nonexistent_key)\n",
count);
rte_kvargs_free(kvlist);
goto fail;
/* call check_handler() on all entries with key="check", it
* should fail as the value is not recognized by the handler */
if (rte_kvargs_process(kvlist, "check", check_handler, NULL) == 0) {
- printf("rte_kvargs_process() is success bu should not\n");
+ printf("rte_kvargs_process() is success but should not\n");
rte_kvargs_free(kvlist);
goto fail;
}
* in previous test_lpm6_routes.h . Because this table has only 1000
* lines, keeping it doesn't make LPM6 test case so large and also
* make the algorithm to generate rule table unnecessary and the
- * algorithm to genertate test input IPv6 and associated expected
+ * algorithm to generate test input IPv6 and associated expected
* next_hop much simple.
*/
MAX_MATCH, set_ids_cache);
/*
* For cache mode, keys overwrite when signature same.
- * the mutimatch should work like single match.
+ * the multimatch should work like single match.
*/
TEST_ASSERT(ret_ht == M_MATCH_CNT && ret_vbf == M_MATCH_CNT &&
ret_cache == 1,
}
/*
- * test function for mempool test based on singple consumer and single producer,
+ * test function for mempool test based on single consumer and single producer,
* can run on one lcore only
*/
static int
}
/*
- * it tests the mempool operations based on singple producer and single consumer
+ * it tests the mempool operations based on single producer and single consumer
*/
static int
test_mempool_sp_sc(void)
}
if (mz->len != maxlen) {
- printf("Memzone reserve with 0 size did not return bigest block\n");
+ printf("Memzone reserve with 0 size did not return biggest block\n");
printf("Expected size = %zu, actual size = %zu\n",
maxlen, mz->len);
rte_dump_physmem_layout(stdout);
if (mz->len < minlen || mz->len > maxlen) {
printf("Memzone reserve with 0 size and alignment %u did not return"
- " bigest block\n", align);
+ " biggest block\n", align);
printf("Expected size = %zu-%zu, actual size = %zu\n",
minlen, maxlen, mz->len);
rte_dump_physmem_layout(stdout);
if (mz != memzone1)
return -1;
- printf("test duplcate zone name\n");
+ printf("test duplicate zone name\n");
mz = rte_memzone_reserve(TEST_MEMZONE_NAME("testzone1"), 100,
SOCKET_ID_ANY, 0);
if (mz != NULL)
err = rte_metrics_update_value(RTE_METRICS_GLOBAL, KEY, VALUE);
TEST_ASSERT(err >= 0, "%s, %d", __func__, __LINE__);
- /* Successful Test: Valid port_id otherthan RTE_METRICS_GLOBAL, key
+ /* Successful Test: Valid port_id other than RTE_METRICS_GLOBAL, key
* and value
*/
err = rte_metrics_update_value(9, KEY, VALUE);
return -1;
}
- /* Make a pool for cloned packeets */
+ /* Make a pool for cloned packets */
mp = rte_pktmbuf_pool_create_by_ops("pcapng_test_pool", NUM_PACKETS,
0, 0,
rte_pcapng_mbuf_size(pkt_len),
/* test of exit power management for an invalid lcore */
ret = rte_power_exit(TEST_POWER_LCORE_INVALID);
if (ret == 0) {
- printf("Unpectedly exit power management successfully for "
+ printf("Unexpectedly exit power management successfully for "
"lcore %u\n", TEST_POWER_LCORE_INVALID);
rte_power_unset_env();
return -1;
/*
* rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered
- * the queiscent state.
+ * the quiescent state.
*/
static int
test_rcu_qsbr_synchronize(void)
rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
- /* Test if the API returns after unregisterng all the threads */
+ /* Test if the API returns after unregistering all the threads */
for (i = 0; i < RTE_MAX_LCORE; i++)
rte_rcu_qsbr_thread_unregister(t[0], i);
rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
}
static struct test_config ovfl_test1_config = {
- .ifname = "queue avergage overflow test interface",
+ .ifname = "queue average overflow test interface",
.msg = "overflow test 1 : use one RED configuration,\n"
" increase average queue size to target level,\n"
- " check maximum number of bits requirte_red to represent avg_s\n\n",
+ " check maximum number of bits required to represent avg_s\n\n",
.htxt = "avg queue size "
"wq_log2 "
"fraction bits "
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
}
- /* min_treshold == max_treshold */
+ /* min_threshold == max_threshold */
if (rte_red_config_init(&config, 0, 1, 1, 0) == 0) {
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
}
- /* min_treshold > max_treshold */
+ /* min_threshold > max_threshold */
if (rte_red_config_init(&config, 0, 2, 1, 0) == 0) {
printf("%i: rte_red_config_init should have failed!\n", __LINE__);
return -1;
* increases .called counter. Function returns value stored in .ret field
* of the structure.
* In case of some parameters in some functions the expected value is unknown
- * and cannot be detrmined prior to call. Such parameters are stored
+ * and cannot be determined prior to call. Such parameters are stored
* in structure and can be compared or analyzed later in test case code.
*
* Below structures and functions follow the rules just described.
.action = RTE_PIPELINE_ACTION_PORT,
{.port_id = port_out_id[i^1]},
};
- printf("Setting secont table to output to port\n");
+ printf("Setting second table to output to port\n");
/* Add the default action for the table. */
ret = rte_pipeline_table_default_entry_add(p,
/*
* calculate hashes, complements, then adjust keys with
- * complements and recalsulate hashes
+ * complements and recalculate hashes
*/
for (i = 0; i < RTE_DIM(rng_arr); i++) {
for (k = 0; k < 100; k++) {
#! /usr/bin/env python3
-# SPDX-License-Identitifer: BSD-3-Clause
+# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2020 Intel Corporation
import subprocess
# Triggering this rule, which starts a line and ends it
# with a { identifies a versioned section. The section name is
- # the rest of the line with the + and { symbols remvoed.
+ # the rest of the line with the + and { symbols removed.
# Triggering this rule sets in_sec to 1, which actives the
# symbol rule below
/^.*{/ {
}
}
- # This rule idenfies the end of a section, and disables the
+ # This rule identifies the end of a section, and disables the
# symbol rule
/.*}/ {in_sec=0}
# Just inform the user of this occurrence, but
# don't flag it as an error
echo -n "INFO: symbol $symname is added but "
- echo -n "patch has insuficient context "
+ echo -n "patch has insufficient context "
echo -n "to determine the section name "
echo -n "please ensure the version is "
echo "EXPERIMENTAL"
v:mID="63"
id="shape63-63"><title
id="title149">Sheet.63</title><desc
- id="desc151">Contanier/App</desc><v:textBlock
+ id="desc151">Container/App</desc><v:textBlock
v:margins="rect(4,4,4,4)" /><v:textRect
height="22.5"
width="90"
a DPDK application to send and receive raw packets through the Kernel.
In order to improve Rx and Tx performance this implementation makes use of
-PACKET_MMAP, which provides a mmap'ed ring buffer, shared between user space
+PACKET_MMAP, which provides a mmapped ring buffer, shared between user space
and kernel, that's used to send and receive packets. This helps reducing system
calls and the copies needed between user space and Kernel.
- mlx4_core: hardware driver managing Mellanox ConnectX-3 devices.
- mlx4_en: Ethernet device driver that provides kernel network interfaces.
- - mlx4_ib: InifiniBand device driver.
+ - mlx4_ib: InfiniBand device driver.
- ib_uverbs: user space driver for verbs (entry point for libibverbs).
- **Firmware update**
A timeout value is set in the driver to control the waiting time before
dropping a packet. Once the timer is expired, the delay drop will be
- deactivated for all the Rx queues with this feature enable. To re-activeate
+ deactivated for all the Rx queues with this feature enable. To re-activate
it, a rearming is needed and it is part of the kernel driver starting from
OFED 5.5.
For the MARK action the last 16 values in the full range are reserved for
internal PMD purposes (to emulate FLAG action). The valid range for the
- MARK action values is 0-0xFFEF for the 16-bit mode and 0-xFFFFEF
+ MARK action values is 0-0xFFEF for the 16-bit mode and 0-0xFFFFEF
for the 24-bit mode, the flows with the MARK action value outside
the specified range will be rejected.
- mlx5_core: hardware driver managing Mellanox
ConnectX-4/ConnectX-5/ConnectX-6/BlueField devices and related Ethernet kernel
network devices.
- - mlx5_ib: InifiniBand device driver.
+ - mlx5_ib: InfiniBand device driver.
- ib_uverbs: user space driver for Verbs (entry point for libibverbs).
- **Firmware update**
for a failed enqueue burst operation and try enqueuing in a whole later.
Similar as enqueue, there are two dequeue functions:
-``rte_cryptodev_raw_dequeue`` for dequeing single operation, and
+``rte_cryptodev_raw_dequeue`` for dequeuing single operation, and
``rte_cryptodev_raw_dequeue_burst`` for dequeuing a burst of operations (e.g.
all operations in a ``struct rte_crypto_sym_vec`` descriptor). The
``rte_cryptodev_raw_dequeue_burst`` function allows the user to provide callback
- if all buses report RTE_IOVA_PA, then the preferred IOVA mode is RTE_IOVA_PA,
- if all buses report RTE_IOVA_VA, then the preferred IOVA mode is RTE_IOVA_VA,
-- if all buses report RTE_IOVA_DC, no bus expressed a preferrence, then the
+- if all buses report RTE_IOVA_DC, no bus expressed a preference, then the
preferred mode is RTE_IOVA_DC,
- if the buses disagree (at least one wants RTE_IOVA_PA and at least one wants
RTE_IOVA_VA), then the preferred IOVA mode is RTE_IOVA_DC (see below with the
+ rte_ring
rte_ring supports multi-producer enqueue and multi-consumer dequeue.
- However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptable.
+ However, it is non-preemptive, this has a knock on effect of making rte_mempool non-preemptible.
.. note::
height="14.642858"
x="39.285713"
y="287.16254" /></flowRegion><flowPara
- id="flowPara4817">offse</flowPara></flowRoot> <text
+ id="flowPara4817">offset</flowPara></flowRoot> <text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#9cc3e5;fill-opacity:1;stroke:none;stroke-width:0.23616144"
x="74.16684"
height="14.642858"
x="39.285713"
y="287.16254" /></flowRegion><flowPara
- id="flowPara4817">offse</flowPara></flowRoot> <text
+ id="flowPara4817">offset</flowPara></flowRoot> <text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:3.14881921px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;display:inline;fill:#a8d08d;fill-opacity:1;stroke:none;stroke-width:0.23616144"
x="16.351753"
This occurs when a packet queue has reached maximum capacity and cannot store any more packets.
In this situation, all arriving packets are dropped.
-The flow through the dropper is illustrated in :numref:`figure_flow_tru_droppper`.
+The flow through the dropper is illustrated in :numref:`figure_flow_tru_dropper`.
The RED/WRED/PIE algorithm is exercised first and tail drop second.
-.. _figure_flow_tru_droppper:
+.. _figure_flow_tru_dropper:
-.. figure:: img/flow_tru_droppper.*
+.. figure:: img/flow_tru_dropper.*
Flow Through the Dropper
- ``ttl``: maximum SFF hopes (6 bits).
- ``length``: total length in 4 bytes words (6 bits).
- ``reserved1``: reserved1 bits (4 bits).
-- ``mdtype``: ndicates format of NSH header (4 bits).
+- ``mdtype``: indicates format of NSH header (4 bits).
- ``next_proto``: indicates protocol type of encap data (8 bits).
- ``spi``: service path identifier (3 bytes).
- ``sindex``: service index (1 byte).
To perform data transfer use standard ``rte_rawdev_enqueue_buffers()`` and
``rte_rawdev_dequeue_buffers()`` APIs. Not all messages produce sensible
-responses hence dequeueing is not always necessary.
+responses hence dequeuing is not always necessary.
BPHY CGX/RPM PMD
----------------
Support PCRE back tracking ctrl.
PCRE call outs
- Support PCRE call outes.
+ Support PCRE call routes.
PCRE forward reference
Support Forward reference.
* **igb_uio: Fixed possible mmap failure for Linux >= 4.5.**
- The mmaping of the iomem range of the PCI device fails for kernels that
+ The mmapping of the iomem range of the PCI device fails for kernels that
enabled the ``CONFIG_IO_STRICT_DEVMEM`` option. The error seen by the
user is as similar to the following::
* The ``rte_cryptodev_configure()`` function does not create the session
mempool for the device anymore.
* The ``rte_cryptodev_queue_pair_attach_sym_session()`` and
- ``rte_cryptodev_queue_pair_dettach_sym_session()`` functions require
+ ``rte_cryptodev_queue_pair_detach_sym_session()`` functions require
the new parameter ``device id``.
* Parameters of ``rte_cryptodev_sym_session_create()`` were modified to
accept ``mempool``, instead of ``device id`` and ``rte_crypto_sym_xform``.
value 0.
- Fixes: 40b966a211ab ("ivshmem: library changes for mmaping using ivshmem")
+ Fixes: 40b966a211ab ("ivshmem: library changes for mmapping using ivshmem")
* **ixgbe/base: Fix SFP probing.**
.. literalinclude:: ../../../examples/ip_reassembly/main.c
:language: c
- :start-after: mbufs stored int the gragment table. 8<
- :end-before: >8 End of mbufs stored int the fragmentation table.
+ :start-after: mbufs stored in the fragment table. 8<
+ :end-before: >8 End of mbufs stored in the fragmentation table.
:dedent: 1
Packet Reassembly and Forwarding
.. literalinclude:: ../../../examples/l2fwd-cat/l2fwd-cat.c
:language: c
:start-after: Initialize the Environment Abstraction Layer (EAL). 8<
- :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
:dedent: 1
The next task is to initialize the PQoS library and configure CAT. The
.. literalinclude:: ../../../examples/server_node_efd/node/node.c
:language: c
:start-after: Packets dequeued from the shared ring. 8<
- :end-before: >8 End of packets dequeueing.
+ :end-before: >8 End of packets dequeuing.
Finally, note that both processes updates statistics, such as transmitted, received
and dropped packets, which are shown and refreshed by the server app.
.. literalinclude:: ../../../examples/skeleton/basicfwd.c
:language: c
:start-after: Initializion the Environment Abstraction Layer (EAL). 8<
- :end-before: >8 End of initializion the Environment Abstraction Layer (EAL).
+ :end-before: >8 End of initialization the Environment Abstraction Layer (EAL).
:dedent: 1
"resource_id": 10
}}
-To query the available frequences of an lcore, use the query_cpu_freq command.
+To query the available frequencies of an lcore, use the query_cpu_freq command.
Where {core_num} is the lcore to query.
Before using this command, please enable responses via the set_query command on the host.
Indicate tunnel offload rule type
- ``tunnel_set {tunnel_id}``: mark rule as tunnel offload decap_set type.
-- ``tunnel_match {tunnel_id}``: mark rule as tunel offload match type.
+- ``tunnel_match {tunnel_id}``: mark rule as tunnel offload match type.
Matching pattern
^^^^^^^^^^^^^^^^
rte_bbdev_log_debug("DMA response desc %p", desc);
*op = desc->enc_req.op_addr;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
for (cb_idx = 0; cb_idx < cbs_in_op; ++cb_idx) {
desc = q->ring_addr + ((q->head_free_desc + desc_offset +
cb_idx) & q->sw_ring_wrap_mask);
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
status |= desc_error << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log_debug("DMA response desc %p", desc);
(*op)->turbo_dec.iter_count = (desc->dec_req.iter + 2) >> 1;
/* crc_pass = 0 when decoder fails */
(*op)->status = !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
return 1;
iter_count = RTE_MAX(iter_count, (uint8_t) desc->dec_req.iter);
/* crc_pass = 0 when decoder fails, one fails all */
status |= !(desc->dec_req.crc_pass) << RTE_BBDEV_CRC_ERROR;
- /* Check the decriptor error field, return 1 on error */
+ /* Check the descriptor error field, return 1 on error */
desc_error = check_desc_error(desc->enc_req.error);
status |= desc_error << RTE_BBDEV_DATA_ERROR;
rte_bbdev_log_debug("DMA response desc %p", desc);
uint16_t queues_num; /*< Null BBDEV queues number */
};
-/* Accecptable params for null BBDEV devices */
+/* Acceptable params for null BBDEV devices */
#define BBDEV_NULL_MAX_NB_QUEUES_ARG "max_nb_queues"
#define BBDEV_NULL_SOCKET_ID_ARG "socket_id"
uint16_t queues_num; /*< Turbo SW device queues number */
};
-/* Accecptable params for Turbo SW devices */
+/* Acceptable params for Turbo SW devices */
#define TURBO_SW_MAX_NB_QUEUES_ARG "max_nb_queues"
#define TURBO_SW_SOCKET_ID_ARG "socket_id"
{
int comp = 0;
- /* Segragating ETH from SEC devices */
+ /* Segregating ETH from SEC devices */
if (dev1->device_type > dev2->device_type)
comp = 1;
else if (dev1->device_type < dev2->device_type)
int qman_irqsource_add(u32 bits);
/**
- * qman_fq_portal_irqsource_add - samilar to qman_irqsource_add, but it
+ * qman_fq_portal_irqsource_add - similar to qman_irqsource_add, but it
* takes portal (fq specific) as input rather than using the thread affined
* portal.
*/
struct qm_dqrr_entry *qman_dequeue(struct qman_fq *fq);
/**
- * qman_dqrr_consume - Consume the DQRR entriy after volatile dequeue
+ * qman_dqrr_consume - Consume the DQRR entry after volatile dequeue
* @fq: Frame Queue on which the volatile dequeue command is issued
* @dq: DQRR entry to consume. This is the one which is provided by the
* 'qbman_dequeue' command.
* @cgr: the 'cgr' object to deregister
*
* "Unplugs" this CGR object from the portal affine to the cpu on which this API
- * is executed. This must be excuted on the same affine portal on which it was
+ * is executed. This must be executed on the same affine portal on which it was
* created.
*/
__rte_internal
/* Specifies the stash request queue this portal should use */
uint8_t sdest;
- /* Specifes a specific portal index to map or QBMAN_ANY_PORTAL_IDX
+ /* Specifies a specific portal index to map or QBMAN_ANY_PORTAL_IDX
* for don't care. The portal index will be populated by the
* driver when the ioctl() successfully completes.
*/
struct dpaa_ioctl_portal_map {
/* Input parameter, is a qman or bman portal required. */
enum dpaa_portal_type type;
- /* Specifes a specific portal index to map or 0xffffffff
+ /* Specifies a specific portal index to map or 0xffffffff
* for don't care.
*/
uint32_t index;
fslmc_bus = driver->fslmc_bus;
- /* Cleanup the PA->VA Translation table; From whereever this function
+ /* Cleanup the PA->VA Translation table; From wherever this function
* is called from.
*/
if (rte_eal_iova_mode() == RTE_IOVA_PA)
dpio_epoll_fd = epoll_create(1);
ret = rte_dpaa2_intr_enable(dpio_dev->intr_handle, 0);
if (ret) {
- DPAA2_BUS_ERR("Interrupt registeration failed");
+ DPAA2_BUS_ERR("Interrupt registration failed");
return -1;
}
struct rte_cryptodev_data *crypto_data;
};
uint32_t fqid; /*!< Unique ID of this queue */
- uint16_t flow_id; /*!< To be used by DPAA2 frmework */
+ uint16_t flow_id; /*!< To be used by DPAA2 framework */
uint8_t tc_index; /*!< traffic class identifier */
uint8_t cgid; /*! < Congestion Group id for this queue */
uint64_t rx_pkts;
struct qbman_result *dq);
/**
- * qbman_check_command_complete() - Check if the previous issued dq commnd
+ * qbman_check_command_complete() - Check if the previous issued dq command
* is completed and results are available in memory.
* @s: the software portal object.
* @dq: the dequeue result read from the memory.
/**
* qbman_result_DQ_odpid() - Get the seqnum field in dequeue response
- * odpid is valid only if ODPVAILD flag is TRUE.
+ * odpid is valid only if ODPVALID flag is TRUE.
* @dq: the dequeue result.
*
* Return odpid.
* qbman_result_SCN_state() - Get the state field in State-change notification
* @scn: the state change notification.
*
- * Return the state in the notifiation.
+ * Return the state in the notification.
*/
__rte_internal
uint8_t qbman_result_SCN_state(const struct qbman_result *scn);
/* Parsing CGCU */
/**
- * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid
+ * qbman_result_cgcu_cgid() - Check CGCU resource id, i.e. cgid
* @scn: the state change notification.
*
* Return the CGCU resource id.
__rte_internal
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
/**
- * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor
+ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
* @d: the enqueue descriptor.
* @response_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
* @opr_id: the order point record id.
* @seqnum: the order restoration sequence number.
- * @incomplete: indiates whether this is the last fragments using the same
- * sequeue number.
+ * @incomplete: indicates whether this is the last fragments using the same
+ * sequence number.
*/
__rte_internal
void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
uint8_t qbman_result_eqresp_rspid(struct qbman_result *eqresp);
/**
- * qbman_result_eqresp_rc() - determines if enqueue command is sucessful.
+ * qbman_result_eqresp_rc() - determines if enqueue command is successful.
* @eqresp: enqueue response.
*
- * Return 0 when command is sucessful.
+ * Return 0 when command is successful.
*/
__rte_internal
uint8_t qbman_result_eqresp_rc(struct qbman_result *eqresp);
/**
* These functions change the FQ flow-control stuff between XON/XOFF. (The
* default is XON.) This setting doesn't affect enqueues to the FQ, just
- * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when
+ * dequeues. XOFF FQs will remain in the tentatively-scheduled state, even when
* non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is
* changed to XOFF after it had already become truly-scheduled to a channel, and
* a pull dequeue of that channel occurs that selects that FQ for dequeuing,
continue;
}
- /* skip non-mmapable BARs */
+ /* skip non-mmappable BARs */
if ((reg->flags & VFIO_REGION_INFO_FLAG_MMAP) == 0) {
free(reg);
continue;
int rte_vdev_init(const char *name, const char *args);
/**
- * Uninitalize a driver specified by name.
+ * Uninitialize a driver specified by name.
*
* @param name
* The pointer to a driver name to be uninitialized.
/*
* If device class GUID matches, call the probe function of
- * registere drivers for the vmbus device.
+ * register drivers for the vmbus device.
* Return -1 if initialization failed,
* and 1 if no driver found for this device.
*/
#define CGX_CMRX_INT_OVERFLW BIT_ULL(1)
/*
* CN10K stores number of lmacs in 4 bit filed
- * in contraty to CN9K which uses only 3 bits.
+ * in contrary to CN9K which uses only 3 bits.
*
* In theory masks should differ yet on CN9K
* bits beyond specified range contain zeros.
{
plt_dump("W0: cir_mantissa \t\t\t%d\nW0: pebs_mantissa \t\t\t0x%03x",
bpf->cir_mantissa, bpf->pebs_mantissa);
- plt_dump("W0: peir_matissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
+ plt_dump("W0: peir_mantissa \t\t\t\t%d\nW0: cbs_exponent \t\t\t%d",
bpf->peir_mantissa, bpf->cbs_exponent);
plt_dump("W0: cir_exponent \t\t\t%d\nW0: pebs_exponent \t\t\t%d",
bpf->cir_exponent, bpf->pebs_exponent);
if (profile->peak.rate && min_rate > profile->peak.rate)
min_rate = profile->peak.rate;
- /* Each packet accomulate single count, whereas HW
+ /* Each packet accumulate single count, whereas HW
* considers each unit as Byte, so we need convert
* user pps to bps
*/
/* Ethtype: Offset 12B, len 2B */
kex_cap.bit.ethtype_0 = npc_is_kex_enabled(
npc, NPC_LID_LA, NPC_LT_LA_ETHER, 12 * 8, 2 * 8);
- /* QINQ VLAN Ethtype: ofset 8B, len 2B */
+ /* QINQ VLAN Ethtype: offset 8B, len 2B */
kex_cap.bit.ethtype_x = npc_is_kex_enabled(
npc, NPC_LID_LB, NPC_LT_LB_STAG_QINQ, 8 * 8, 2 * 8);
/* VLAN ID0 : Outer VLAN: Offset 2B, len 2B */
uint32_t rss_grps; /* rss groups supported */
uint16_t flow_prealloc_size; /* Pre allocated mcam size */
uint16_t flow_max_priority; /* Max priority for flow */
- uint16_t switch_header_type; /* Suppprted switch header type */
+ uint16_t switch_header_type; /* Supported switch header type */
uint32_t mark_actions; /* Number of mark actions */
uint32_t vtag_strip_actions; /* vtag insert/strip actions */
uint16_t pf_func; /* pf_func of device */
if (cpt_ctx->fc_type == FC_GEN) {
/*
* We need to always say IV is from DPTR as user can
- * sometimes iverride IV per operation.
+ * sometimes override IV per operation.
*/
fctx->enc.iv_source = CPT_FROM_DPTR;
tailroom = rte_pktmbuf_tailroom(pkt);
if (likely((headroom >= 24) &&
(tailroom >= 8))) {
- /* In 83XX this is prerequivisit for Direct mode */
+ /* In 83XX this is prerequisite for Direct mode */
*flags |= SINGLE_BUF_HEADTAILROOM;
}
param->bufs[0].vaddr = seg_data;
* Set dlen = sum(sizeof(fpm address), ROUNDUP8(message len),
* ROUNDUP8(sign len(r and s), public key len(x and y coordinates),
* prime len, order len)).
- * Please note sign, public key and order can not excede prime length
+ * Please note sign, public key and order can not exceed prime length
* i.e. 6 * p_align
*/
dlen = sizeof(fpm_table_iova) + m_align + (8 * p_align);
* @authlen: size of digest
*
* The IV prepended before hmac payload must be 8 bytes consisting
- * of COUNT||BEAERER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
+ * of COUNT||BEARER||DIR. The COUNT is of 32-bits, bearer is of 5 bits and
* direction is of 1 bit - totalling to 38 bits.
*
* Return: size of descriptor written in words or negative number on error
/* Set the variable size of data the register will write */
if (dir == OP_TYPE_ENCAP_PROTOCOL) {
- /* We will add the interity data so add its length */
+ /* We will add the integrity data so add its length */
MATHI(p, SEQINSZ, ADD, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
} else {
- /* We will check the interity data so remove its length */
+ /* We will check the integrity data so remove its length */
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQOUTSZ, 4, IMMED2);
/* Do not take the ICV in the out-snooping configuration */
MATHI(p, SEQINSZ, SUB, PDCP_MAC_I_LEN, VSEQINSZ, 4, IMMED2);
CLRW_CLR_C1MODE,
CLRW, 0, 4, IMMED);
- /* Load the key for authentcation */
+ /* Load the key for authentication */
KEY(p, KEY1, authdata->key_enc_flags, authdata->key,
authdata->keylen, INLINE_KEY(authdata));
rte_free(dpaax_iova_table_p->entries);
dpaax_iova_table_p = NULL;
- DPAAX_DEBUG("IOVA Table cleanedup");
+ DPAAX_DEBUG("IOVA Table cleaned");
}
int
u8 data[12];
};
-/* Generic AQ section in proflie */
+/* Generic AQ section in profile */
struct iavf_profile_aq_section {
u16 opcode;
u16 flags;
case VIRTCHNL_OP_DCF_CMD_DESC:
return "VIRTCHNL_OP_DCF_CMD_DESC";
case VIRTCHNL_OP_DCF_CMD_BUFF:
- return "VIRTCHHNL_OP_DCF_CMD_BUFF";
+ return "VIRTCHNL_OP_DCF_CMD_BUFF";
case VIRTCHNL_OP_DCF_DISABLE:
return "VIRTCHNL_OP_DCF_DISABLE";
case VIRTCHNL_OP_DCF_GET_VSI_MAP:
static bool mlx5_common_initialized;
/**
- * One time innitialization routine for run-time dependency on glue library
+ * One time initialization routine for run-time dependency on glue library
* for multiple PMDs. Each mlx5 PMD that depends on mlx5_common module,
* must invoke in its constructor.
*/
* Destroy a mempool registration object.
*
* @param standalone
- * Whether @p mpr owns its MRs excludively, i.e. they are not shared.
+ * Whether @p mpr owns its MRs exclusively, i.e. they are not shared.
*/
static void
mlx5_mempool_reg_destroy(struct mlx5_mr_share_cache *share_cache,
* Pointer to file stream.
*
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_devx_cmd_flow_dump(void *fdb_domain __rte_unused,
* Check if the address belongs to memory seg list.
*
* @param addr
- * Memory address to be ckeced.
+ * Memory address to be checked.
* @param msl
* Memory seg list.
*
* Check if the address belongs to rte memory.
*
* @param addr
- * Memory address to be ckeced.
+ * Memory address to be checked.
*
* @return
* True if it belongs, false otherwise.
enum mlx5_mem_flags {
MLX5_MEM_ANY = 0,
- /* Memory will be allocated dpends on sys_mem_en. */
+ /* Memory will be allocated depends on sys_mem_en. */
MLX5_MEM_SYS = 1 << 0,
/* Memory should be allocated from system. */
MLX5_MEM_RTE = 1 << 1,
* timestamp format supported by the queue.
*
* @return
- * Converted timstamp format settings.
+ * Converted timestamp format settings.
*/
static inline uint32_t
mlx5_ts_format_conv(uint32_t ts_format)
}
/**
- * Register mr. Given protection doamin pointer, pointer to addr and length
+ * Register mr. Given protection domain pointer, pointer to addr and length
* register the memory region.
*
* @param[in] pd
* @param[in] addr
* Pointer to memory start address (type devx_device_ctx).
* @param[in] length
- * Lengtoh of the memory to register.
+ * Length of the memory to register.
* @param[out] pmd_mr
* pmd_mr struct set with lkey, address, length, pointer to mr object, mkey
*
/**
* This API allocates aligned or non-aligned memory. The free can be on either
* aligned or nonaligned memory. To be protected - even though there may be no
- * alignment - in Windows this API will unconditioanlly call _aligned_malloc()
+ * alignment - in Windows this API will unconditionally call _aligned_malloc()
* with at least a minimal alignment size.
*
* @param[in] align
#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
-/* Minimum ring bufer size for memory allocation */
+/* Minimum ring buffer size for memory allocation */
#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
ADF_RING_SIZE_4K : SIZE)
#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
#define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) ((void)0)
-/* Just avoid store and compiler (impliciltly) reordering */
+/* Just avoid store and compiler (implicitly) reordering */
#define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) rte_wmb()
/* TIMESTAMP */
uint64_t bf : 1;
/** Comp/decomp operation */
uint64_t op : 2;
- /** Data sactter */
+ /** Data scatter */
uint64_t ds : 1;
/** Data gather */
uint64_t dg : 1;
uint64_t bf : 1;
/** Comp/decomp operation */
uint64_t op : 2;
- /** Data sactter */
+ /** Data scatter */
uint64_t ds : 1;
/** Data gather */
uint64_t dg : 1;
/**< PCI device id of ZIP VF */
#define PCI_DEVICE_ID_OCTEONTX_ZIPVF 0xA037
-/* maxmum number of zip vf devices */
+/* maximum number of zip vf devices */
#define ZIP_MAX_VFS 8
/* max size of one chunk */
} else if (info.error) {
rte_mempool_obj_iter(mp, qat_comp_stream_destroy, NULL);
QAT_LOG(ERR,
- "Destoying mempool %s as at least one element failed initialisation",
+ "Destroying mempool %s as at least one element failed initialisation",
stream_pool_name);
rte_mempool_free(mp);
mp = NULL;
BCMFS_UNKNOWN
};
-/* A table to store registered queue pair opertations */
+/* A table to store registered queue pair operations */
struct bcmfs_hw_queue_pair_ops_table {
rte_spinlock_t tl;
/* Number of used ops structs in the table. */
nb_descriptors = FS_RM_MAX_REQS;
if (qp_conf->iobase == NULL) {
- BCMFS_LOG(ERR, "IO onfig space null");
+ BCMFS_LOG(ERR, "IO config space null");
return -EINVAL;
}
/** Crypto Request processing successful. */
#define BCMFS_SYM_RESPONSE_SUCCESS (0)
-/** Crypot Request processing protocol failure. */
+/** Crypto Request processing protocol failure. */
#define BCMFS_SYM_RESPONSE_PROTO_FAILURE (1)
-/** Crypot Request processing completion failure. */
+/** Crypto Request processing completion failure. */
#define BCMFS_SYM_RESPONSE_COMPL_ERROR (2)
-/** Crypot Request processing hash tag check error. */
+/** Crypto Request processing hash tag check error. */
#define BCMFS_SYM_RESPONSE_HASH_TAG_ERROR (3)
/** Maximum threshold length to adjust AAD in continuation
#include "bcmfs_sym_defs.h"
#include "bcmfs_sym_req.h"
-/* structure to hold element's arrtibutes */
+/* structure to hold element's attributes */
struct fsattr {
void *va;
uint64_t pa;
{
struct bcmfs_queue *txq = &qp->tx_q;
- /* sync in bfeore ringing the door-bell */
+ /* sync in before ringing the door-bell */
rte_wmb();
FS_MMIO_WRITE32(txq->descs_inflight,
void *register_base_addr; /* Base address for SEC's
* register memory for this job ring.
*/
- uint8_t coalescing_en; /* notifies if coelescing is
+ uint8_t coalescing_en; /* notifies if coalescing is
* enabled for the job ring
*/
sec_job_ring_state_t jr_state; /* The state of this job ring */
/* @brief Set interrupt coalescing parameters on the Job Ring.
* @param [in] job_ring The job ring
- * @param [in] irq_coalesing_timer Interrupt coalescing timer threshold.
+ * @param [in] irq_coalescing_timer Interrupt coalescing timer threshold.
* This value determines the maximum
* amount of time after processing a
* descriptor before raising an interrupt.
/* Structure encompassing a job descriptor which is to be processed
* by SEC. User should also initialise this structure with the callback
- * function pointer which will be called by driver after recieving proccessed
+ * function pointer which will be called by driver after receiving processed
* descriptor from SEC. User data is also passed in this data structure which
* will be sent as an argument to the user callback function.
*/
* value that indicates an IRQ disable action into UIO file descriptor
* of this job ring.
*
- * @param [in] uio_fd UIO File descripto
+ * @param [in] uio_fd UIO File descriptor
* @retval 0 for success
* @retval -1 value for error
*
* value that indicates an IRQ disable action into UIO file descriptor
* of this job ring.
*
- * @param [in] uio_fd UIO File descripto
+ * @param [in] uio_fd UIO File descriptor
* @retval 0 for success
* @retval -1 value for error
*
case CCP_AUTH_ALGO_SHA512_HMAC:
/**
* 1. Load PHash1 = H(k ^ ipad); to LSB
- * 2. generate IHash = H(hash on meassage with PHash1
+ * 2. generate IHash = H(hash on message with PHash1
* as init values);
* 3. Retrieve IHash 2 slots for 384/512
* 4. Load Phash2 = H(k ^ opad); to LSB
/* Maximum length for digest */
#define DIGEST_LENGTH_MAX 64
-/* SHA LSB intialiazation values */
+/* SHA LSB initialization values */
#define SHA1_H0 0x67452301UL
#define SHA1_H1 0xefcdab89UL
#include <rte_crypto_sym.h>
#include <cryptodev_pmd.h>
-/**< CCP sspecific */
+/**< CCP specific */
#define MAX_HW_QUEUES 5
#define CCP_MAX_TRNG_RETRIES 10
#define CCP_ALIGN(x, y) ((((x) + (y - 1)) / y) * y)
}
ops[pkts++] = op;
- /* report op status to sym->op and then free the ctx memeory */
+ /* report op status to sym->op and then free the ctx memory */
rte_mempool_put(ctx->ctx_pool, (void *)ctx);
qman_dqrr_consume(fq, dq);
/* CPT VF device initialization */
otx_cpt_vfvq_init(cptvf);
- /* Send msg to PF to assign currnet Q to required group */
+ /* Send msg to PF to assign current Q to required group */
cptvf->vfgrp = group;
err = otx_cpt_send_vf_grp_msg(cptvf, group);
if (err) {
otx_cpt_handle_mbox_intr(struct cpt_vf *cptvf);
/*
- * Checks if VF is able to comminicate with PF
+ * Checks if VF is able to communicate with PF
* and also gets the CPT number this VF is associated to.
*/
int
&mdata, (void **)&prep_req);
if (unlikely(ret)) {
- CPT_LOG_DP_ERR("prep cryto req : op %p, cpt_op 0x%x "
+ CPT_LOG_DP_ERR("prep crypto req : op %p, cpt_op 0x%x "
"ret 0x%x", op, (unsigned int)cpt_op, ret);
return NULL;
}
static int qat_asym_check_nonzero(rte_crypto_param n)
{
if (n.length < 8) {
- /* Not a case for any cryptograpic function except for DH
+ /* Not a case for any cryptographic function except for DH
* generator which very often can be of one byte length
*/
size_t i;
ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC) {
/* In case of AES-CCM this may point to user selected
- * memory or iv offset in cypto_op
+ * memory or iv offset in crypto_op
*/
uint8_t *aad_data = op->sym->aead.aad.data;
/* This is true AAD length, it not includes 18 bytes of
{
/*
* Ensure updated avail->idx is visible to host.
- * For virtio on IA, the notificaiton is through io port operation
+ * For virtio on IA, the notification is through io port operation
* which is a serialization instruction itself.
*/
VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
struct rte_ring *completed;
uint16_t i;
- desc = rte_zmalloc_socket("dma_skelteon_desc",
+ desc = rte_zmalloc_socket("dma_skeleton_desc",
nb_desc * sizeof(struct skeldma_desc),
RTE_CACHE_LINE_SIZE, hw->socket_id);
if (desc == NULL) {
struct rte_event_dev_info info;
int i, ret;
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(
pool_name, MAX_EVENTS, 0, 0, 512, rte_socket_id());
if (!eventdev_test_mempool) {
cn9k_sso_set_rsrc(dev);
if (cnxk_sso_testsuite_run(dev_name))
return rc;
- /* Verift dual ws mode. */
+ /* Verify dual ws mode. */
printf("Verifying CN9K Dual workslot mode\n");
dev->dual_ws = 1;
cn9k_sso_set_rsrc(dev);
}
/* This is expected with eventdev API!
- * It blindly attemmpts to unmap all queues.
+ * It blindly attempts to unmap all queues.
*/
if (i == DLB2_MAX_NUM_QIDS_PER_LDB_CQ) {
DLB2_LOG_DBG("dlb2: ignoring LB QID %d not mapped for qm_port %d.\n",
bool setup_done;
/* enq_configured is set when the qm port is created */
bool enq_configured;
- uint8_t implicit_release; /* release events before dequeueing */
+ uint8_t implicit_release; /* release events before dequeuing */
} __rte_cache_aligned;
struct dlb2_queue {
0,
RTE_EVENT_PORT_ATTR_DEQ_DEPTH,
&dequeue_depth)) {
- printf("%d: Error retrieveing dequeue depth\n", __LINE__);
+ printf("%d: Error retrieving dequeue depth\n", __LINE__);
goto err;
}
* Selects the token pop mode for a DLB2 port.
*/
enum dlb2_token_pop_mode {
- /* Pop the CQ tokens immediately after dequeueing. */
+ /* Pop the CQ tokens immediately after dequeuing. */
AUTO_POP,
/* Pop CQ tokens after (dequeue_depth - 1) events are released.
* Supported on load-balanced ports only.
struct rte_event_dev_info info;
const char *pool_name = "evdev_dpaa2_test_pool";
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
MAX_EVENTS,
0 /*MBUF_CACHE_SIZE*/,
/* Multiple 24-bit flow ids will map to the same DSW-level flow. The
* number of DSW flows should be high enough make it unlikely that
* flow ids of several large flows hash to the same DSW-level flow.
- * Such collisions will limit parallism and thus the number of cores
+ * Such collisions will limit parallelism and thus the number of cores
* that may be utilized. However, configuring a large number of DSW
* flows might potentially, depending on traffic and actual
* application flow id value range, result in each such DSW-level flow
/* Only one outstanding migration per port is allowed */
#define DSW_MAX_PAUSED_FLOWS (DSW_MAX_PORTS*DSW_MAX_FLOWS_PER_MIGRATION)
-/* Enough room for paus request/confirm and unpaus request/confirm for
+/* Enough room for pause request/confirm and unpaus request/confirm for
* all possible senders.
*/
#define DSW_CTL_IN_RING_SIZE ((DSW_MAX_PORTS-1)*4)
static void
dsw_port_note_op(struct dsw_port *port, uint16_t num_events)
{
- /* To pull the control ring reasonbly often on busy ports,
+ /* To pull the control ring reasonably often on busy ports,
* each dequeued/enqueued event is considered an 'op' too.
*/
port->ops_since_bg_task += (num_events+1);
* addition, a port cannot be left "unattended" (e.g. unused)
* for long periods of time, since that would stall
* migration. Eventdev API extensions to provide a cleaner way
- * to archieve both of these functions should be
+ * to archive both of these functions should be
* considered.
*/
if (unlikely(events_len == 0)) {
/*
* In Cavium OCTEON TX SoC, all accesses to the device registers are
- * implictly strongly ordered. So, The relaxed version of IO operation is
+ * implicitly strongly ordered. So, The relaxed version of IO operation is
* safe to use with out any IO memory barriers.
*/
#define ssovf_read64 rte_read64_relaxed
struct rte_event_dev_info info;
const char *pool_name = "evdev_octeontx_test_pool";
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name,
MAX_EVENTS,
0 /*MBUF_CACHE_SIZE*/,
struct rte_event_dev_info info;
int i, ret;
- /* Create and destrory pool for each test case to make it standalone */
+ /* Create and destroy pool for each test case to make it standalone */
eventdev_test_mempool = rte_pktmbuf_pool_create(pool_name, MAX_EVENTS,
0, 0, 512,
rte_socket_id());
event.flow_id, flags, lookup_mem);
/* Extracting tstamp, if PTP enabled. CGX will prepend
* the timestamp at starting of packet data and it can
- * be derieved from WQE 9 dword which corresponds to SG
+ * be derived from WQE 9 dword which corresponds to SG
* iova.
* rte_pktmbuf_mtod_offset can be used for this purpose
* but it brings down the performance as it reads
}
PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
- "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
+ "Success - creating eventdev device %s, numa_node:[%d], do_validation:[%s]"
" , self_test:[%s]\n",
dev->data->dev_id,
name,
}
- /* Start the devicea */
+ /* Start the device */
if (!err) {
if (rte_event_dev_start(evdev) < 0) {
PMD_DRV_LOG(ERR, "%s:%d: Error with start call\n",
uint16_t outstanding_releases __rte_cache_aligned;
uint16_t inflight_max; /* app requested max inflights for this port */
uint16_t inflight_credits; /* num credits this port has right now */
- uint8_t implicit_release; /* release events before dequeueing */
+ uint8_t implicit_release; /* release events before dequeuing */
uint16_t last_dequeue_burst_sz; /* how big the burst was */
uint64_t last_dequeue_ticks; /* used to track burst processing time */
NULL,
0);
- /* Verify that the resetable stats are reset, and others are not */
+ /* Verify that the resettable stats are reset, and others are not */
static const uint64_t queue_expected_zero[] = {
0 /* rx */,
0 /* tx */,
}
/* assigning mbuf from the acquired objects */
for (i = 0; (i < ret) && bufs[i].addr; i++) {
- /* TODO-errata - objerved that bufs may be null
+ /* TODO-errata - observed that bufs may be null
* i.e. first buffer is valid, remaining 6 buffers
* may be null.
*/
break;
}
- /* Imsert it into an ordered linked list */
+ /* Insert it into an ordered linked list */
for (curr = &head; curr[0] != NULL; curr = curr[0]) {
if ((uintptr_t)node <= (uintptr_t)curr[0])
break;
ret = octeontx_fpapf_aura_detach(gpool);
if (ret) {
- fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ fpavf_log_err("Failed to detach gaura %u. error code=%d\n",
gpool, ret);
}
typedef void (*rx_user_meta_hook_fn)(struct rte_mbuf *mbuf,
const uint32_t *meta,
void *ext_user_data);
-/* TX hook poplulate *meta, with up to 20 bytes. meta_cnt
+/* TX hook populate *meta, with up to 20 bytes. meta_cnt
* returns the number of uint32_t words populated, 0 to 5
*/
typedef void (*tx_user_meta_hook_fn)(const struct rte_mbuf *mbuf,
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U);
hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U);
- /* VLAN proimisc bu defauld */
+ /* VLAN promisc by default */
hw_atl_rpf_vlan_prom_mode_en_set(self, 1);
/* Rx Interrupts */
return 0;
}
-/*Distrubting fifo size */
+/* Distributing FIFO size */
static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
{
unsigned int fifo_size;
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
unsigned int kr_redrv;
- /* Auto-negotiation atate machine support */
+ /* Auto-negotiation state machine support */
unsigned int an_int;
unsigned int an_status;
enum axgbe_an an_result;
retry = 1;
again2:
- /* Read the specfied register */
+ /* Read the specified register */
i2c_op.cmd = AXGBE_I2C_CMD_READ;
i2c_op.target = target;
i2c_op.len = val_len;
{
return 0;
/* Dummy API since there is no case to support
- * external phy devices registred through kerenl apis
+ * external phy devices registered through kernel APIs
*/
}
#include <rte_mempool.h>
#include <rte_mbuf.h>
-/* Useful to avoid shifting for every descriptor prepration*/
+/* Useful to avoid shifting for every descriptor preparation */
#define TX_DESC_CTRL_FLAGS 0xb000000000000000
#define TX_DESC_CTRL_FLAG_TMST 0x40000000
#define TX_FREE_BULK 8
* block.
*
* RAMROD_CMD_ID_ETH_UPDATE
- * Used to update the state of the leading connection, usually to udpate
+ * Used to update the state of the leading connection, usually to update
* the RSS indirection table. Completes on the RCQ of the leading
* connection. (Not currently used under FreeBSD until OS support becomes
* available.)
* the RCQ of the leading connection.
*
* RAMROD_CMD_ID_ETH_CFC_DEL
- * Used when tearing down a conneciton prior to driver unload. Completes
+ * Used when tearing down a connection prior to driver unload. Completes
* on the RCQ of the leading connection (since the current connection
* has been completely removed from controller memory).
*
/*
* It's ok if the actual decrement is issued towards the memory
- * somewhere between the lock and unlock. Thus no more explict
+ * somewhere between the lock and unlock. Thus no more explicit
* memory barrier is needed.
*/
if (common) {
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] terminate ramrod", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
case BNX2X_RX_MODE_ALLMULTI_PROMISC:
case BNX2X_RX_MODE_PROMISC:
/*
- * According to deffinition of SI mode, iface in promisc mode
+ * According to definition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
/*
* Cleans the object that have internal lists without sending
- * ramrods. Should be run when interrutps are disabled.
+ * ramrods. Should be run when interrupts are disabled.
*/
static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
{
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed successfully - all resourses are released.
+ * did not complete successfully - all resources are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
/*
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global
- * attention once gloabl blocks are reset and gates are opened
+ * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery
* last).
*/
/*
* At this stage no more interrupts will arrive so we may safely clean
- * the queue'able objects here in case they failed to get cleaned so far.
+ * the queueable objects here in case they failed to get cleaned so far.
*/
if (IS_PF(sc)) {
bnx2x_squeeze_objects(sc);
}
/*
- * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
+ * Encapsulate an mbuf cluster into the Tx BD chain and makes the memory
* visible to the controller.
*
* If an mbuf is submitted to this routine and cannot be given to the
return val1 != 0;
}
-/* send load requrest to mcp and analyze response */
+/* send load request to MCP and analyze response */
static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
{
PMD_INIT_FUNC_TRACE(sc);
* sum of vn_min_rates.
* or
* 0 - if all the min_rates are 0.
- * In the later case fainess algorithm should be deactivated.
+ * In the later case fairness algorithm should be deactivated.
* If all min rates are not zero then those that are zeroes will be set to 1.
*/
static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)
txq_init->fw_sb_id = fp->fw_sb_id;
/*
- * set the TSS leading client id for TX classfication to the
+ * set the TSS leading client id for Tx classification to the
* leading RSS client id
*/
txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);
}
/*
-* Walk the PCI capabiites list for the device to find what features are
-* supported. These capabilites may be enabled/disabled by firmware so it's
+* Walk the PCI capabilities list for the device to find what features are
+* supported. These capabilities may be enabled/disabled by firmware so it's
* best to walk the list rather than make assumptions.
*/
static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
} else {
sc->devinfo.int_block = INT_BLOCK_IGU;
-/* do not allow device reset during IGU info preocessing */
+/* do not allow device reset during IGU info processing */
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
- /* get PCI capabilites */
+ /* get PCI capabilities */
bnx2x_probe_pci_caps(sc);
if (sc->devinfo.pcie_msix_cap_reg != 0) {
* stay set)
* f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry
- * to the last enrty in the ILT.
+ * to the last entry in the ILT.
*
* Notes:
* Currently the PF error in the PGLC is non recoverable.
/**
* bnx2x_pf_flr_clnup
* a. re-enable target read on the PF
- * b. poll cfc per function usgae counter
+ * b. poll cfc per function usage counter
* c. poll the qm perfunction usage counter
* d. poll the tm per function usage counter
* e. poll the tm per function scan-done indication
}; /* struct bnx2x_slowpath */
/*
- * Port specifc data structure.
+ * Port specific data structure.
*/
struct bnx2x_port {
/*
* Port Management Function (for 57711E only).
* When this field is set the driver instance is
- * responsible for managing port specifc
+ * responsible for managing port specific
* configurations such as handling link attentions.
*/
uint32_t pmf;
/*
* MCP scratchpad address for port specific statistics.
- * The device is responsible for writing statistcss
+ * The device is responsible for writing statistics
* back to the MCP for use with management firmware such
* as UMP/NC-SI.
*/
* already registered for this port (which means that the user wants storage
* services).
* 2. During cnic-related load, to know if offload mode is already configured
- * in the HW or needs to be configrued. Since the transition from nic-mode to
- * offload-mode in HW causes traffic coruption, nic-mode is configured only
+ * in the HW or needs to be configured. Since the transition from nic-mode to
+ * offload-mode in HW causes traffic corruption, nic-mode is configured only
* in ports on which storage services where never requested.
*/
#define CONFIGURE_NIC_MODE(sc) (!CHIP_IS_E1x(sc) && !CNIC_ENABLED(sc))
/*
* Prepare the first stats ramrod (will be completed with
- * the counters equal to zero) - init counters to somethig different.
+ * the counters equal to zero) - init counters to something different.
*/
memset(&sc->fw_stats_data->storm_counters, 0xff,
sizeof(struct stats_counter));
};
struct bnx2x_eth_q_stats_old {
- /* Fields to perserve over fw reset*/
+ /* Fields to preserve over FW reset */
uint32_t total_unicast_bytes_received_hi;
uint32_t total_unicast_bytes_received_lo;
uint32_t total_broadcast_bytes_received_hi;
uint32_t total_multicast_bytes_transmitted_hi;
uint32_t total_multicast_bytes_transmitted_lo;
- /* Fields to perserve last of */
+ /* Fields to preserve last of */
uint32_t total_bytes_received_hi;
uint32_t total_bytes_received_lo;
uint32_t total_bytes_transmitted_hi;
tl->length = length;
}
-/* Initiliaze header of the first tlv and clear mailbox*/
+/* Initialize header of the first TLV and clear mailbox */
static void
bnx2x_vf_prep(struct bnx2x_softc *sc, struct vf_first_tlv *first_tlv,
uint16_t type, uint16_t length)
uint8_t pad[2];
};
-/* rlease the VF's acquired resources */
+/* release the VF's acquired resources */
struct vf_release_tlv {
struct vf_first_tlv first_tlv;
uint16_t vf_id; /* for debug */
/* temporarily used for RTT */
#define XSEMI_CLK1_RESUL_CHIP (1e-3)
-/* used for Host Coallescing */
+/* used for Host Coalescing */
#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
#define TSDM_TIMER_TICK_RESUL_CHIP (1 * (1e-6))
#define PORT_FEATURE_MBA_LINK_SPEED_20G 0x20000000
/* Secondary MBA configuration,
- * see mba_config for the fileds defination.
+ * see mba_config for the fields definition.
*/
uint32_t mba_config2;
#define PORT_FEATURE_BOFM_CFGD_VEN 0x00080000
/* Secondary MBA configuration,
- * see mba_vlan_cfg for the fileds defination.
+ * see mba_vlan_cfg for the fields definition.
*/
uint32_t mba_vlan_cfg2;
#define EXTENDED_DEV_INFO_SHARED_CFG_DBG_GEN3_COMPLI_ENA 0x00080000
/* Override Rx signal detect threshold when enabled the threshold
- * will be set staticaly
+ * will be set statically
*/
#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_MASK 0x00100000
#define EXTENDED_DEV_INFO_SHARED_CFG_OVERRIDE_RX_SIG_SHIFT 20
* elements on a per byte or word boundary.
*
* example: an array with 8 entries each 4 bit wide. This array will fit into
- * a single dword. The diagrmas below show the array order of the nibbles.
+ * a single dword. The diagrams below show the array order of the nibbles.
*
- * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the standard ordering:
*
* | | | |
* 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
};
/*
- * Used to suppoert NSCI get OS driver version
+ * Used to support NSCI get OS driver version
* On driver load the version value will be set
* On driver unload driver value of 0x0 will be set
*/
struct os_drv_ver {
#define DRV_VER_NOT_LOADED 0
- /*personalites orrder is importent */
+ /* personalities order is important */
#define DRV_PERS_ETHERNET 0
#define DRV_PERS_ISCSI 1
#define DRV_PERS_FCOE 2
- /*shmem2 struct is constatnt can't add more personalites here*/
+ /* shmem2 struct is constant can't add more personalities here */
#define MAX_DRV_PERS 3
uint32_t versions[MAX_DRV_PERS];
};
/* Flag to the driver that PF's drv_info_host_addr buffer was read */
uint32_t mfw_drv_indication; /* Offset 0x19c */
- /* We use inidcation for each PF (0..3) */
+ /* We use indication for each PF (0..3) */
#define MFW_DRV_IND_READ_DONE_OFFSET(_pf_) (1 << (_pf_))
union { /* For various OEMs */ /* Offset 0x1a0 */
/*
- * Segment types for host coaslescing
+ * Segment types for host coalescing
*/
enum hc_segment {
HC_REGULAR_SEGMENT,
/*
- * IGU block operartion modes (in Everest2)
+ * IGU block operation modes (in Everest2)
*/
enum igu_mode {
HC_IGU_BC_MODE,
/*
- * Types of statistcis query entry
+ * Types of statistics query entry
*/
enum stats_query_type {
STATS_TYPE_QUEUE,
/*
- * Taffic types used in ETS and flow control algorithms
+ * Traffic types used in ETS and flow control algorithms
*/
enum traffic_type {
LLFC_TRAFFIC_TYPE_NW,
REG_WR(sc, PXP2_REG_WR_CDU_MPS, val);
}
- /* Validate number of tags suppoted by device */
+ /* Validate number of tags supported by device */
#define PCIE_REG_PCIER_TL_HDR_FC_ST 0x2980
val = REG_RD(sc, PCIE_REG_PCIER_TL_HDR_FC_ST);
val &= 0xFF;
for (i = ilt_cli->start; i <= ilt_cli->end; i++)
ecore_ilt_line_init_op(sc, ilt, i, initop);
- /* init/clear the ILT boundries */
+ /* init/clear the ILT boundaries */
ecore_ilt_boundary_init_op(sc, ilt_cli, ilt->start_line, initop);
}
/*
* called during init common stage, ilt clients should be initialized
- * prioir to calling this function
+ * prior to calling this function
*/
static void ecore_ilt_init_page_size(struct bnx2x_softc *sc, uint8_t initop)
{
#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT (0x1 << 3)
#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR (0x1 << 4)
#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND (0x1 << 1)
-/* [R 1] ATC initalization done */
+/* [R 1] ATC initialization done */
#define ATC_REG_ATC_INIT_DONE 0x1100bc
/* [RW 6] Interrupt mask register #0 read/write */
#define ATC_REG_ATC_INT_MASK 0x1101c8
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 0x60078
/* [RW 10] Write client 0: Assert pause threshold. Not Functional */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-/* [R 24] The number of full blocks occpied by port. */
+/* [R 24] The number of full blocks occupied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
#define CCM_REG_CAM_OCCUP 0xd0188
#define IGU_REG_PCI_PF_MSIX_FUNC_MASK 0x130148
#define IGU_REG_PCI_PF_MSI_EN 0x130140
/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
- * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * pending; 1 = pending. Pending means interrupt was asserted; and write
* done was not received. Data valid only in addresses 0-4. all the rest are
* zero.
*/
/* [R 28] this field hold the last information that caused reserved
* attention. bits [19:0] - address; [22:20] function; [23] reserved;
* [27:24] the master that caused the attention - according to the following
- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
* dbu; 8 = dmae
*/
#define MISC_REG_GRC_RSV_ATTN 0xa3c0
/* [R 28] this field hold the last information that caused timeout
* attention. bits [19:0] - address; [22:20] function; [23] reserved;
* [27:24] the master that caused the attention - according to the following
- * encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+ * encoding:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
* dbu; 8 = dmae
*/
#define MISC_REG_GRC_TIMEOUT_ATTN 0xa3c4
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_LLH_PTP_PARAM_MASK 0x187a0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P0_TLLH_PTP_PARAM_MASK 0x187f0
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_LLH_PTP_PARAM_MASK 0x187c8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
* MAC DA 2. The reset default is set to mask out all parameters.
*/
#define NIG_REG_P1_TLLH_PTP_PARAM_MASK 0x187f8
-/* [RW 14] Mask regiser for the rules used in detecting PTP packets. Set
+/* [RW 14] Mask register for the rules used in detecting PTP packets. Set
* each bit to 1 to mask out that particular rule. 0-{IPv4 DA 0; UDP DP 0} .
* 1-{IPv4 DA 0; UDP DP 1} . 2-{IPv4 DA 1; UDP DP 0} . 3-{IPv4 DA 1; UDP DP
* 1} . 4-{IPv6 DA 0; UDP DP 0} . 5-{IPv6 DA 0; UDP DP 1} . 6-{IPv6 DA 1;
#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
#define PBF_REG_HDRS_AFTER_TAG_0 0x15c0b8
-/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+/* [R 1] Removed for E3 B0 - Indicates which COS is connected to the highest
* priority in the command arbiter.
*/
#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
*/
#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
/* [R 11] Removed for E3 B0 - Port 0 threshold used by arbiter in 16 byte
- * lines used when pause not suppoterd.
+ * lines used when pause not supported.
*/
#define PBF_REG_P0_ARB_THRSH 0x1400e4
/* [R 11] Removed for E3 B0 - Current credit for port 0 in the tx port
* queues.
*/
#define QM_REG_OVFERROR 0x16805c
-/* [RC 6] the Q were the qverflow occurs */
+/* [RC 6] the Q were the overflow occurs */
#define QM_REG_OVFQNUM 0x168058
/* [R 16] Pause state for physical queues 15-0 */
#define QM_REG_PAUSESTATE0 0x168410
if set, generate pcie_err_attn output when this error is seen. WC \
*/
#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2 \
- (1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+ (1 << 3) /* Receive UR Status for Function 2. If set, generate \
pcie_err_attn output when this error is seen. WC */
#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2 \
(1 << 2) /* Completer Timeout Status Status for Function 2, if \
if set, generate pcie_err_attn output when this error is seen. WC \
*/
#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5 \
- (1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+ (1 << 3) /* Receive UR Status for Function 5. If set, generate \
pcie_err_attn output when this error is seen. WC */
#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5 \
(1 << 2) /* Completer Timeout Status Status for Function 5, if \
if (rc != ECORE_SUCCESS) {
__ecore_vlan_mac_h_pend(sc, o, *ramrod_flags);
- /** Calling function should not diffrentiate between this case
+ /** Calling function should not differentiate between this case
* and the case in which there is already a pending ramrod
*/
rc = ECORE_PENDING;
union {
ecore_list_t macs_head;
uint32_t macs_num; /* Needed for DEL command */
- int next_bin; /* Needed for RESTORE flow with aprox match */
+ int next_bin; /* Needed for RESTORE flow with approx match */
} data;
int done; /* set to TRUE, when the command has been handled,
} else {
/*
- * CAM credit is equaly divided between all active functions
+ * CAM credit is equally divided between all active functions
* on the PATH.
*/
if (func_num > 0) {
RAMROD_RESTORE,
/* Execute the next command now */
RAMROD_EXEC,
- /* Don't add a new command and continue execution of posponed
+ /* Don't add a new command and continue execution of postponed
* commands. If not set a new command will be added to the
* pending commands list.
*/
/* Last configured indirection table */
uint8_t ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
- /* flags for enabling 4-tupple hash on UDP */
+ /* flags for enabling 4-tuple hash on UDP */
uint8_t udp_rss_v4;
uint8_t udp_rss_v6;
#define ECORE_MULTI_TX_COS_E3B0 3
#define ECORE_MULTI_TX_COS 3 /* Maximum possible */
#define MAC_PAD (ECORE_ALIGN(ETH_ALEN, sizeof(uint32_t)) - ETH_ALEN)
-/* DMAE channel to be used by FW for timesync workaroun. A driver that sends
+/* DMAE channel to be used by FW for timesync workaround. A driver that sends
* timesync-related ramrods must not use this DMAE command ID.
*/
#define FW_DMAE_CMD_ID 6
}
/******************************************************************************
* Description:
- * E3B0 disable will return basicly the values to init values.
+ * E3B0 disable will return basically the values to init values.
*.
******************************************************************************/
static elink_status_t elink_ets_e3b0_disabled(const struct elink_params *params,
/******************************************************************************
* Description:
- * Disable will return basicly the values to init values.
+ * Disable will return basically the values to init values.
*
******************************************************************************/
elink_status_t elink_ets_disabled(struct elink_params *params,
/******************************************************************************
* Description
- * Set the COS mappimg to SP and BW until this point all the COS are not
+ * Set the COS mapping to SP and BW until this point all the COS are not
* set as SP or BW.
******************************************************************************/
static elink_status_t elink_ets_e3b0_cli_map(const struct elink_params *params,
}
ELINK_DEBUG_P0(sc,
"elink_ets_E3B0_config total BW should be 100");
- /* We can handle a case whre the BW isn't 100 this can happen
+ /* We can handle a case where the BW isn't 100 this can happen
* if the TC are joined.
*/
}
REG_WR(sc, NIG_REG_EGRESS_EMAC0_PORT + port * 4, 1);
#ifdef ELINK_INCLUDE_EMUL
- /* for paladium */
+ /* for palladium */
if (CHIP_REV_IS_EMUL(sc)) {
/* Use lane 1 (of lanes 0-3) */
REG_WR(sc, NIG_REG_XGXS_LANE_SEL_P0 + port * 4, 1);
/* Set Time (based unit is 512 bit time) between automatic
* re-sending of PP packets amd enable automatic re-send of
- * Per-Priroity Packet as long as pp_gen is asserted and
+ * Per-Priority Packet as long as pp_gen is asserted and
* pp_disable is low.
*/
val = 0x8000;
}
/**
- * elink_get_emac_base - retrive emac base address
+ * elink_get_emac_base - retrieve emac base address
*
* @bp: driver handle
* @mdc_mdio_access: access type
elink_cl45_write(sc, phy, reg_set[i].devad, reg_set[i].reg,
reg_set[i].val);
- /* Start KR2 work-around timer which handles BNX2X8073 link-parner */
+ /* Start KR2 work-around timer which handles BNX2X8073 link-partner */
params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE;
elink_update_link_attr(params, params->link_attr_sync);
}
* hence its link is expected to be down
* - SECOND_PHY means that first phy should not be able
* to link up by itself (using configuration)
- * - DEFAULT should be overridden during initialiazation
+ * - DEFAULT should be overridden during initialization
*/
ELINK_DEBUG_P1(sc, "Invalid link indication"
" mpc=0x%x. DISABLING LINK !!!",
ELINK_DEBUG_P0(sc, "FW cmd failed.");
return ELINK_STATUS_ERROR;
}
- /* Step5: Once the command has completed, read the specficied DATA
+ /* Step5: Once the command has completed, read the specified DATA
* registers for any saved results for the command, if applicable
*/
int rc;
if (!BNXT_PF(bp)) {
- PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
+ PMD_DRV_LOG(ERR, "Attempt to allocate VFs on a VF!\n");
return -EINVAL;
}
}
/**
- * Allocates zero'ed memory from the heap.
+ * Allocates zeroed memory from the heap.
*
* Returns success or failure code.
*/
uint32_t max_flows);
/**
- * Allocates zero'ed memory from the heap.
+ * Allocates zeroed memory from the heap.
*
* NOTE: Also performs virt2phy address conversion by default thus is
* can be expensive to invoke.
/** Maximum number of LACP packets from one slave queued in TX ring. */
#define BOND_MODE_8023AX_SLAVE_TX_PKTS 1
/**
- * Timeouts deffinitions (5.4.4 in 802.1AX documentation).
+ * Timeouts definitions (5.4.4 in 802.1AX documentation).
*/
#define BOND_8023AD_FAST_PERIODIC_MS 900
#define BOND_8023AD_SLOW_PERIODIC_MS 29000
uint16_t slave_count; /**< Number of bonded slaves */
struct bond_slave_details slaves[RTE_MAX_ETHPORTS];
- /**< Arary of bonded slaves details */
+ /**< Array of bonded slaves details */
struct mode8023ad_private mode4;
uint16_t tlb_slaves_order[RTE_MAX_ETHPORTS];
{
/* Record default parameters for partner. Partner admin parameters
* are not implemented so set them to arbitrary default (last known) and
- * mark actor that parner is in defaulted state. */
+ * mark actor that partner is in defaulted state. */
port->partner_state = STATE_LACP_ACTIVE;
ACTOR_STATE_SET(port, DEFAULTED);
}
MODE4_DEBUG("LACP -> CURRENT\n");
BOND_PRINT_LACP(lacp);
/* Update selected flag. If partner parameters are defaulted assume they
- * are match. If not defaulted compare LACP actor with ports parner
+ * are match. If not defaulted compare LACP actor with ports partner
* params. */
if (!ACTOR_STATE(port, DEFAULTED) &&
(ACTOR_STATE(port, AGGREGATION) != PARTNER_STATE(port, AGGREGATION)
PARTNER_STATE(port, LACP_ACTIVE);
uint8_t is_partner_fast, was_partner_fast;
- /* No periodic is on BEGIN, LACP DISABLE or when both sides are pasive */
+ /* No periodic is on BEGIN, LACP DISABLE or when both sides are passive */
if (SM_FLAG(port, BEGIN) || !SM_FLAG(port, LACP_ENABLED) || !active) {
timer_cancel(&port->periodic_timer);
timer_force_expired(&port->tx_machine_timer);
SM_FLAG_CLR(port, PARTNER_SHORT_TIMEOUT);
MODE4_DEBUG("-> NO_PERIODIC ( %s%s%s)\n",
- SM_FLAG(port, BEGIN) ? "begind " : "",
+ SM_FLAG(port, BEGIN) ? "begin " : "",
SM_FLAG(port, LACP_ENABLED) ? "" : "LACP disabled ",
- active ? "LACP active " : "LACP pasive ");
+ active ? "LACP active " : "LACP passive ");
return;
}
if ((ACTOR_STATE(port, DISTRIBUTING) || ACTOR_STATE(port, COLLECTING)) &&
!PARTNER_STATE(port, SYNCHRONIZATION)) {
/* If in COLLECTING or DISTRIBUTING state and partner becomes out of
- * sync transit to ATACHED state. */
+ * sync transit to ATTACHED state. */
ACTOR_STATE_CLR(port, DISTRIBUTING);
ACTOR_STATE_CLR(port, COLLECTING);
- /* Clear actor sync to activate transit ATACHED in condition bellow */
+ /* Clear actor sync to activate transit ATTACHED in condition bellow */
ACTOR_STATE_CLR(port, SYNCHRONIZATION);
MODE4_DEBUG("Out of sync -> ATTACHED\n");
}
/* Search for aggregator suitable for this port */
for (i = 0; i < slaves_count; ++i) {
agg = &bond_mode_8023ad_ports[slaves[i]];
- /* Skip ports that are not aggreagators */
+ /* Skip ports that are not aggregators */
if (agg->aggregator_port_id != slaves[i])
continue;
SM_FLAG_SET(port, BEGIN);
- /* LACP is disabled on half duples or link is down */
+ /* LACP is disabled on half duplex or link is down */
if (SM_FLAG(port, LACP_ENABLED)) {
/* If port was enabled set it to BEGIN state */
SM_FLAG_CLR(port, LACP_ENABLED);
port->partner_state = STATE_LACP_ACTIVE | STATE_AGGREGATION;
port->sm_flags = SM_FLAGS_BEGIN;
- /* use this port as agregator */
+ /* use this port as aggregator */
port->aggregator_port_id = slave_id;
if (bond_mode_8023ad_register_lacp_mac(slave_id) < 0) {
struct rte_ether_addr system;
/**< System ID - Slave MAC address, same as bonding MAC address */
uint16_t key;
- /**< Speed information (implementation dependednt) and duplex. */
+ /**< Speed information (implementation dependent) and duplex. */
uint16_t port_priority;
/**< Priority of this (unused in current implementation) */
uint16_t port_number;
* @param port_id Bonding device id
*
* @return
- * agregator mode on success, negative value otherwise
+ * aggregator mode on success, negative value otherwise
*/
int
rte_eth_bond_8023ad_agg_selection_get(uint16_t port_id);
* @param internals Bonding data.
*
* @return
- * Index of slawe on which packet should be sent.
+ * Index of slave on which packet should be sent.
*/
uint16_t
bond_mode_alb_arp_upd(struct client_data *client_info,
* value. Thus, the new internal value of default Rx queue offloads
* has to be masked by rx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
- * value and the value being inhereted from the new slave device.
+ * value and the value being inherited from the new slave device.
*/
rxconf_i->offloads = (rxconf_i->offloads | rxconf->offloads) &
internals->rx_queue_offload_capa;
* value. Thus, the new internal value of default Tx queue offloads
* has to be masked by tx_queue_offload_capa to make sure that only
* commonly supported offloads are preserved from both the previous
- * value and the value being inhereted from the new slave device.
+ * value and the value being inherited from the new slave device.
*/
txconf_i->offloads = (txconf_i->offloads | txconf->offloads) &
internals->tx_queue_offload_capa;
void *userdata;
/* Rlen computation data */
struct cnxk_ipsec_outb_rlens rlens;
- /* Back pinter to eth sec session */
+ /* Back pointer to eth sec session */
struct cnxk_eth_sec_sess *eth_sec;
/* SA index */
uint32_t sa_idx;
/* Retrieving the default desc values */
lmt[off] = cmd[2];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual Tx tstamp registered
* address.
*/
send_mem->w0.subdc = NIX_SUBDC_MEM;
}
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- /* Tx ol_flag for timestam. */
+ /* Tx ol_flag for timestamp. */
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
/* Retrieving the default desc values */
cmd[off] = send_mem_desc[6];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual Tx tstamp registered
* address.
*/
send_mem->w0.cn9k.alg =
}
if (flags & NIX_TX_OFFLOAD_TSTAMP_F) {
- /* Tx ol_flag for timestam. */
+ /* Tx ol_flag for timestamp. */
const uint64x2_t olf = {RTE_MBUF_F_TX_IEEE1588_TMST,
RTE_MBUF_F_TX_IEEE1588_TMST};
/* Set send mem alg to SUB. */
/* This API returns the raw PTP HI clock value. Since LFs do not
* have direct access to PTP registers and it requires mbox msg
* to AF for this value. In fastpath reading this value for every
- * packet (which involes mbox call) becomes very expensive, hence
+ * packet (which involves mbox call) becomes very expensive, hence
* we should be able to derive PTP HI clock value from tsc by
* using freq_mult and clk_delta calculated during configure stage.
*/
}
/*
- * @ret : > 0 filter destroyed succsesfully
+ * @ret : > 0 filter destroyed successfully
* < 0 error destroying filter
* == 1 filter not active / not found
*/
*/
pmask_nports = hweight32(adapter->params.vfres.pmask);
if (pmask_nports < adapter->params.nports) {
- dev_warn(adapter->pdev_dev, "only using %d of %d provissioned"
+ dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
" virtual interfaces; limited by Port Access Rights"
" mask %#x\n", pmask_nports, adapter->params.nports,
adapter->params.vfres.pmask);
* @fl: the Free List
*
* Tests specified Free List to see whether the number of buffers
- * available to the hardware has falled below our "starvation"
+ * available to the hardware has fallen below our "starvation"
* threshold.
*/
static inline bool fl_starving(const struct adapter *adapter,
* @q: the Tx queue
* @n: number of new descriptors to give to HW
*
- * Ring the doorbel for a Tx queue.
+ * Ring the doorbell for a Tx queue.
*/
static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q)
{
}
/**
- * should_tx_packet_coalesce - decides wether to coalesce an mbuf or not
+ * should_tx_packet_coalesce - decides whether to coalesce an mbuf or not
* @txq: tx queue where the mbuf is sent
* @mbuf: mbuf to be sent
* @nflits: return value for number of flits needed
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
* of the Egress Queue Unit and at least 2 Egress Units larger
- * than the SGE's Egress Congrestion Threshold
+ * than the SGE's Egress Congestion Threshold
* (fl_starve_thres - 1).
*/
if (fl->size < s->fl_starve_thres - 1 + 2 * 8)
QM_FQCTRL_CTXASTASHING |
QM_FQCTRL_PREFERINCACHE;
opts.fqd.context_a.stashing.exclusive = 0;
- /* In muticore scenario stashing becomes a bottleneck on LS1046.
+ /* In multicore scenario stashing becomes a bottleneck on LS1046.
* So do not enable stashing in this case
*/
if (dpaa_svr_family != SVR_LS1046A_FAMILY)
dpaa_intf->name = dpaa_device->name;
- /* save fman_if & cfg in the interface struture */
+ /* save fman_if & cfg in the interface structure */
eth_dev->process_private = fman_intf;
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
dpaa_push_mode_max_queue = 0;
- /* if push mode queues to be enabled. Currenly we are allowing
+ /* if push mode queues to be enabled. Currently we are allowing
* only one queue per thread.
*/
if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
void *ptr = rte_dpaa_mem_ptov(qm_fd_addr(&dq->fd));
/* In case of LS1046, annotation stashing is disabled due to L2 cache
- * being bottleneck in case of multicore scanario for this platform.
- * So we prefetch the annoation beforehand, so that it is available
+ * being bottleneck in case of multicore scenario for this platform.
+ * So we prefetch the annotation beforehand, so that it is available
* in cache when accessed.
*/
rte_prefetch0((void *)((uint8_t *)ptr + DEFAULT_RX_ICEOF));
#define FM_FD_ERR_PRS_HDR_ERR 0x00000020
/**< Header error was identified during parsing */
#define FM_FD_ERR_BLOCK_LIMIT_EXCEEDED 0x00000008
- /**< Frame parsed beyind 256 first bytes */
+ /**< Frame parsed beyond 256 first bytes */
#define FM_FD_TX_STATUS_ERR_MASK (FM_FD_ERR_UNSUPPORTED_FORMAT | \
FM_FD_ERR_LENGTH | \
} ioc_fm_pcd_counters_params_t;
/*
- * @Description structure for FM exception definitios
+ * @Description structure for FM exception definitions
*/
typedef struct ioc_fm_pcd_exception_params_t {
ioc_fm_pcd_exceptions exception; /**< The requested exception */
e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET, /**< Ethernet/802.3 MAC */
e_IOC_FM_PCD_MANIP_HDR_RMV_STACKED_QTAGS, /**< stacked QTags */
e_IOC_FM_PCD_MANIP_HDR_RMV_ETHERNET_AND_MPLS,
- /**< MPLS and Ethernet/802.3 MAC header unitl the header
+ /**< MPLS and Ethernet/802.3 MAC header until the header
* which follows the MPLS header
*/
e_IOC_FM_PCD_MANIP_HDR_RMV_MPLS
/*
* @Function fm_pcd_net_env_characteristics_delete
*
- * @Description Deletes a set of Network Environment Charecteristics.
+ * @Description Deletes a set of Network Environment Characteristics.
*
* @Param[in] ioc_fm_obj_t The id of a Network Environment object.
*
* @Return 0 on success; Error code otherwise.
*
* @Cautions Allowed only following fm_pcd_match_table_set() not only of
- * the relevnt node but also the node that points to this node.
+ * the relevant node but also the node that points to this node.
*/
#define FM_PCD_IOC_MATCH_TABLE_MODIFY_KEY_AND_NEXT_ENGINE \
_IOW(FM_IOC_TYPE_BASE, FM_PCD_IOC_NUM(35), \
/**< Number of bytes from beginning of packet to start parsing
*/
ioc_net_header_type first_prs_hdr;
- /**< The type of the first header axpected at 'parsing_offset'
+ /**< The type of the first header expected at 'parsing_offset'
*/
bool include_in_prs_statistics;
/**< TRUE to include this port in the parser statistics */
} ioc_fm_port_pcd_prs_params_t;
/*
- * @Description A structure for defining coarse alassification parameters
+ * @Description A structure for defining coarse classification parameters
* (Must match t_fm_portPcdCcParams defined in fm_port_ext.h)
*/
typedef struct ioc_fm_port_pcd_cc_params_t {
/**< Number of bytes from beginning of packet to start parsing
*/
ioc_net_header_type first_prs_hdr;
- /**< The type of the first header axpected at 'parsing_offset'
+ /**< The type of the first header expected at 'parsing_offset'
*/
} ioc_fm_pcd_prs_start_t;
#define FM_PORT_FRM_ERR_PRS_HDR_ERR FM_FD_ERR_PRS_HDR_ERR
/**< Header error was identified during parsing */
#define FM_PORT_FRM_ERR_BLOCK_LIMIT_EXCEEDED FM_FD_ERR_BLOCK_LIMIT_EXCEEDED
- /**< Frame parsed beyind 256 first bytes */
+ /**< Frame parsed beyond 256 first bytes */
#define FM_PORT_FRM_ERR_PROCESS_TIMEOUT 0x00000001
/**< FPM Frame Processing Timeout Exceeded */
/* @} */
* @Param[in] length length of received data
* @Param[in] status receive status and errors
* @Param[in] position position of buffer in frame
- * @Param[in] h_buf_context A handle of the user acossiated with this buffer
+ * @Param[in] h_buf_context A handle of the user associated with this buffer
*
* @Retval e_RX_STORE_RESPONSE_CONTINUE
* order the driver to continue Rx operation for all ready data.
* @Param[in] p_data A pointer to data received
* @Param[in] status transmit status and errors
* @Param[in] last_buffer is last buffer in frame
- * @Param[in] h_buf_context A handle of the user acossiated with this buffer
+ * @Param[in] h_buf_context A handle of the user associated with this buffer
*/
typedef void (t_fm_port_im_tx_conf_callback) (t_handle h_app,
uint8_t *p_data,
bool pfc_prio_enable[FM_NUM_CONG_GRPS][FM_MAX_PFC_PRIO];
/**< a matrix that represents the map between the CG ids
* defined in 'congestion_grps_to_consider' to the
- * priorties mapping array.
+ * priorities mapping array.
*/
} t_fm_port_congestion_grps;
PMD_INIT_FUNC_TRACE();
if (mask & RTE_ETH_VLAN_FILTER_MASK) {
- /* VLAN Filter not avaialble */
+ /* VLAN Filter not available */
if (!priv->max_vlan_filters) {
DPAA2_PMD_INFO("VLAN filter not available");
return -ENOTSUP;
cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
cong_notif_cfg.threshold_entry = nb_tx_desc;
/* Notify that the queue is not congested when the data in
- * the queue is below this thershold.(90% of value)
+ * the queue is below this threshold.(90% of value)
*/
cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
cong_notif_cfg.message_ctx = 0;
* Dpaa2 link Interrupt handler
*
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
ocfg.oa = 1;
/* Late arrival window size disabled */
ocfg.olws = 0;
- /* ORL resource exhaustaion advance NESN disabled */
+ /* ORL resource exhaustion advance NESN disabled */
ocfg.oeane = 0;
/* Loose ordering enabled */
ocfg.oloe = 1;
}
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
- /*Init fields w.r.t. classficaition*/
+ /* Init fields w.r.t. classification */
memset(&priv->extract.qos_key_extract, 0,
sizeof(struct dpaa2_key_extract));
priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
if (!priv->extract.qos_extract_param) {
DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
- " classificaiton ", ret);
+ " classification ", ret);
goto init_err;
}
priv->extract.qos_key_extract.key_info.ipv4_src_offset =
priv->extract.tc_extract_param[i] =
(size_t)rte_malloc(NULL, 256, 64);
if (!priv->extract.tc_extract_param[i]) {
- DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
+ DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
ret);
goto init_err;
}
#define DPAA2_FLOW_MAX_KEY_SIZE 16
-/*Externaly defined*/
+/* Externally defined */
extern const struct rte_flow_ops dpaa2_flow_ops;
extern const struct rte_tm_ops dpaa2_tm_ops;
flow, pattern, &local_cfg,
device_configured, group);
if (ret) {
- DPAA2_PMD_ERR("IP discrimation failed!");
+ DPAA2_PMD_ERR("IP discrimination failed!");
return -1;
}
(actions[j].conf);
if (rss_conf->queue_num > priv->dist_queues) {
DPAA2_PMD_ERR(
- "RSS number exceeds the distrbution size");
+ "RSS number exceeds the distribution size");
return -ENOTSUP;
}
for (i = 0; i < (int)rss_conf->queue_num; i++) {
qos_cfg.keep_entries = true;
qos_cfg.key_cfg_iova =
(size_t)priv->extract.qos_extract_param;
- /* QoS table is effecitive for multiple TCs.*/
+ /* QoS table is effective for multiple TCs. */
if (priv->num_rx_tc > 1) {
ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
priv->token, &qos_cfg);
0, 0);
if (ret < 0) {
DPAA2_PMD_ERR(
- "Error in addnig entry to QoS table(%d)", ret);
+ "Error in adding entry to QoS table(%d)", ret);
return ret;
}
}
mask_iova = (void *)((size_t)key_iova + DIST_PARAM_IOVA_SIZE);
/* Currently taking only IP protocol as an extract type.
- * This can be exended to other fields using pattern->type.
+ * This can be extended to other fields using pattern->type.
*/
memset(&kg_cfg, 0, sizeof(struct dpkg_profile_cfg));
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
if (*dpaa2_seqn(*bufs)) {
/* Use only queue 0 for Tx in case of atomic/
* ordered packets as packets can get unordered
- * when being tranmitted out from the interface
+ * when being transmitted out from the interface
*/
dpaa2_set_enqueue_descriptor(order_sendq,
(*bufs),
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
*/
#define DPNI_OPT_OPR_PER_TC 0x000080
/**
- * All Tx traffic classes will use a single sender (ignore num_queueus for tx)
+ * All Tx traffic classes will use a single sender (ignore num_queues for tx)
*/
#define DPNI_OPT_SINGLE_SENDER 0x000100
/**
* @page_3.ceetm_reject_bytes: Cumulative count of the number of bytes in all
* frames whose enqueue was rejected
* @page_3.ceetm_reject_frames: Cumulative count of all frame enqueues rejected
- * @page_4: congestion point drops for seleted TC
+ * @page_4: congestion point drops for selected TC
* @page_4.cgr_reject_frames: number of rejected frames due to congestion point
* @page_4.cgr_reject_bytes: number of rejected bytes due to congestion point
* @page_5: policer statistics per TC
* dpkg_prepare_key_cfg()
* @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
* '0' to use the 'default_tc' in such cases
- * @keep_entries: if set to one will not delele existing table entries. This
+ * @keep_entries: if set to one will not delete existing table entries. This
* option will work properly only for dpni objects created with
* DPNI_OPT_HAS_KEY_MASKING option. All previous QoS entries must
* be compatible with new key composition rule.
* @flow_id: Identifies the Rx queue used for matching traffic. Supported
* values are in range 0 to num_queue-1.
* @redirect_obj_token: token that identifies the object where frame is
- * redirected when this rule is hit. This paraneter is used only when one of the
+ * redirected when this rule is hit. This parameter is used only when one of the
* flags DPNI_FS_OPT_REDIRECT_TO_DPNI_RX or DPNI_FS_OPT_REDIRECT_TO_DPNI_TX is
* set.
* The token is obtained using dpni_open() API call. The object must stay
struct dpni_load_ss_cfg *cfg);
/**
- * dpni_eanble_sw_sequence() - Enables a software sequence in the parser
+ * dpni_enable_sw_sequence() - Enables a software sequence in the parser
* profile
* corresponding to the ingress or egress of the DPNI.
* @mc_io: Pointer to MC portal's I/O object
* Maximum number of Ring Descriptors.
*
* Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
- * desscriptors should meet the following condition:
+ * descriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
#define E1000_MIN_RING_DESC 32
};
/*
- * Structure to store filters'info.
+ * Structure to store filters' info.
*/
struct e1000_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/*
* Starting with 631xESB hw supports 2 TX/RX queues per port.
- * Unfortunatelly, all these nics have just one TX context.
- * So we have few choises for TX:
+ * Unfortunately, all these nics have just one TX context.
+ * So we have few choices for TX:
* - Use just one TX queue.
* - Allow cksum offload only for one TX queue.
* - Don't allow TX cksum offload at all.
* (Multiple Receive Queues are mutually exclusive with UDP
* fragmentation and are not supported when a legacy receive
* descriptor format is used).
- * Which means separate RX routinies - as legacy nics (82540, 82545)
+ * Which means separate RX routines - as legacy nics (82540, 82545)
* don't support extended RXD.
* To avoid it we support just one RX queue for now (no RSS).
*/
}
/*
- * It executes link_update after knowing an interrupt is prsent.
+ * It executes link_update after knowing an interrupt is present.
*
* @param dev
* Pointer to struct rte_eth_dev.
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
struct em_ctx_info {
uint64_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask */
- union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lengths */
};
/**
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = igb_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "igb_check_mq_mode fails with %d.",
}
}
- /* confiugre msix for rx interrupt */
+ /* configure MSI-X for Rx interrupt */
eth_igb_configure_msix_intr(dev);
/* Configure for OS presence */
}
/*
- * It executes link_update after knowing an interrupt is prsent.
+ * It executes link_update after knowing an interrupt is present.
*
* @param dev
* Pointer to struct rte_eth_dev.
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be added.
+ * ntuple_filter: pointer to the filter that will be added.
*
* @return
* - On success, zero.
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be removed.
+ * ntuple_filter: pointer to the filter that will be removed.
*
* @return
* - On success, zero.
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be added.
+ * ntuple_filter: pointer to the filter that will be added.
*
* @return
* - On success, zero.
*
* @param
* dev: Pointer to struct rte_eth_dev.
- * ntuple_filter: ponter to the filter that will be removed.
+ * ntuple_filter: pointer to the filter that will be removed.
*
* @return
* - On success, zero.
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
E1000_WRITE_REG(hw, E1000_ETQF(E1000_ETQF_FILTER_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
E1000_WRITE_REG(hw, E1000_TIMINCA, 0);
return 0;
struct igb_rss_filter_list igb_filter_rss_list;
/**
- * Please aware there's an asumption for all the parsers.
+ * Please be aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
/**
* Check if the flow rule is supported by igb.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
else
E1000_WRITE_REG(hw, E1000_DTXSWC, E1000_DTXSWC_VMDQ_LOOPBACK_EN);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
rah = E1000_READ_REG(hw, E1000_RAH(0));
rah &= ~ (0xFF << E1000_RAH_POOLSEL_SHIFT);
E1000_WRITE_REG(hw, E1000_RAH(0), rah);
(TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
/**
- * Strucutre to check if new context need be built
+ * Structure to check if new context need be built
*/
struct igb_advctx_info {
uint64_t flags; /**< ol_flags related to context build. */
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
* Maximum number of Ring Descriptors.
*
* Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
- * desscriptors should meet the following condition:
+ * descriptors should meet the following condition:
* (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
*/
sw_ring[tx_id].last_id = tx_id;
}
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
} while (tx_id != tx_next);
/* Walk the list and find the next mbuf, if any. */
do {
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
if (sw_ring[tx_id].mbuf)
igb_rss_disable(dev);
- /* RCTL: eanble VLAN filter */
+ /* RCTL: enable VLAN filter */
rctl = E1000_READ_REG(hw, E1000_RCTL);
rctl |= E1000_RCTL_VFE;
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
++rxq->rx_stats.refill_partial;
}
- /* When we submitted free recources to device... */
+ /* When we submitted free resources to device... */
if (likely(i > 0)) {
/* ...let HW know that it can fill buffers with data. */
ena_com_write_sq_doorbell(rxq->ena_com_io_sq);
/* While processing submitted and completed descriptors (rx and tx path
* respectively) in a loop it is desired to:
- * - perform batch submissions while populating sumbissmion queue
+ * - perform batch submissions while populating submission queue
* - avoid blocking transmission of other packets during cleanup phase
* Hence the utilization ratio of 1/8 of a queue size or max value if the size
* of the ring is very big - like 8k Rx rings.
#define RX_BD_CR ((ushort)0x0004) /* CRC or Frame error */
#define RX_BD_SH ((ushort)0x0008) /* Reserved */
#define RX_BD_NO ((ushort)0x0010) /* Rcvd non-octet aligned frame */
-#define RX_BD_LG ((ushort)0x0020) /* Rcvd frame length voilation */
+#define RX_BD_LG ((ushort)0x0020) /* Rcvd frame length violation */
#define RX_BD_FIRST ((ushort)0x0400) /* Reserved */
#define RX_BD_LAST ((ushort)0x0800) /* last buffer in the frame */
#define RX_BD_INT 0x00800000
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "IPv4 exact match mask");
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "UDP exact match mask");
return ENOTSUP;
}
- /* check that the suppied mask exactly matches capabilty */
+ /* check that the supplied mask exactly matches capability */
if (!mask_exact_match((const uint8_t *)&supported_mask,
(const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "TCP exact match mask");
}
/**
- * Build the intenal enic filter structure from the provided pattern. The
+ * Build the internal enic filter structure from the provided pattern. The
* pattern is validated as the items are copied.
*
* @param pattern[in]
* @param items_info[in]
* Info about this NICs item support, like valid previous items.
* @param enic_filter[out]
- * NIC specfilc filters derived from the pattern.
+ * NIC specific filters derived from the pattern.
* @param error[out]
*/
static int
}
/**
- * Build the intenal version 1 NIC action structure from the provided pattern.
+ * Build the internal version 1 NIC action structure from the provided pattern.
* The pattern is validated as the items are copied.
*
* @param actions[in]
* @param enic_action[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
}
/**
- * Build the intenal version 2 NIC action structure from the provided pattern.
+ * Build the internal version 2 NIC action structure from the provided pattern.
* The pattern is validated as the items are copied.
*
* @param actions[in]
* @param enic_action[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
}
/* NIC does not support GTP tunnels. No Items are allowed after this.
- * This prevents the specificaiton of further items.
+ * This prevents the specification of further items.
*/
arg->header_level = 0;
/*
* Use the raw L4 buffer to match GTP as fm_header_set does not have
- * GTP header. UDP dst port must be specifiec. Using the raw buffer
+ * GTP header. UDP dst port must be specific. Using the raw buffer
* does not affect such UDP item, since we skip UDP in the raw buffer.
*/
fm_data->fk_header_select |= FKH_L4RAW;
/* Remove trailing comma */
if (buf[0])
*(bp - 1) = '\0';
- ENICPMD_LOG(DEBUG, " Acions: %s", buf);
+ ENICPMD_LOG(DEBUG, " Actions: %s", buf);
}
static int
if (ret < 0 && ret != -ENOENT)
return rte_flow_error_set(error, -ret,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
- NULL, "enic: rte_hash_lookup(aciton)");
+ NULL, "enic: rte_hash_lookup(action)");
if (ret == -ENOENT) {
/* Allocate a new action on the NIC. */
ENICPMD_FUNC_TRACE();
- /* Get or create an aciton handle. */
+ /* Get or create an action handle. */
ret = enic_action_handle_get(fm, action_in, error, &ah);
if (ret)
return ret;
}
/* If we were using interrupts, set the interrupt vector to -1
- * to disable interrupts. We are not disabling link notifcations,
+ * to disable interrupts. We are not disabling link notifications,
* though, as we want the polling of link status to continue working.
*/
if (enic->rte_dev->data->dev_conf.intr_conf.lsc)
* The app should not send oversized
* packets. tx_pkt_prepare includes a check as
* well. But some apps ignore the device max size and
- * tx_pkt_prepare. Oversized packets cause WQ errrors
+ * tx_pkt_prepare. Oversized packets cause WQ errors
* and the NIC ends up disabling the whole WQ. So
* truncate packets..
*/
#define FM10K_TX_MAX_MTU_SEG UINT8_MAX
/*
- * byte aligment for HW RX data buffer
+ * byte alignment for HW RX data buffer
* Datasheet requires RX buffer addresses shall either be 512-byte aligned or
* be 8-byte aligned but without crossing host memory pages (4KB alignment
* boundaries). Satisfy first option.
}
/*
- * disable RX queue, wait unitl HW finished necessary flush operation
+ * disable RX queue, wait until HW finished necessary flush operation
*/
static inline int
rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
}
/*
- * disable TX queue, wait unitl HW finished necessary flush operation
+ * disable TX queue, wait until HW finished necessary flush operation
*/
static inline int
tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
struct fm10k_dev_info *info =
FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
- /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
+ /* Initialize bus info. Normally we would call fm10k_get_bus_info(), but
* there is no way to get link status without reading BAR4. Until this
* works, assume we have maximum bandwidth.
* @todo - fix bus info
struct rte_eth_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf;
#ifndef RTE_FM10K_RX_OLFLAGS_ENABLE
- /* whithout rx ol_flags, no VP flag report */
+ /* without rx ol_flags, no VP flag report */
if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
return -1;
#endif
struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
mb_def.nb_segs = 1;
- /* data_off will be ajusted after new mbuf allocated for 512-byte
+ /* data_off will be adjusted after new mbuf allocated for 512-byte
* alignment.
*/
mb_def.data_off = RTE_PKTMBUF_HEADROOM;
if (!(rxdp->d.staterr & FM10K_RXD_STATUS_DD))
return 0;
- /* Vecotr RX will process 4 packets at a time, strip the unaligned
+ /* Vector RX will process 4 packets at a time, strip the unaligned
* tails in case it's not multiple of 4.
*/
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_FM10K_DESCS_PER_LOOP);
_mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1);
#if defined(RTE_ARCH_X86_64)
- /* B.1 load 2 64 bit mbuf poitns */
+ /* B.1 load 2 64 bit mbuf points */
mbp2 = _mm_loadu_si128((__m128i *)&mbufp[pos+2]);
#endif
fm10k_desc_to_pktype_v(descs0, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_FM10K_DESCS_PER_LOOP))
* Interrupt handler triggered by NIC for handling
* specific event.
*
- * @param: The address of parameter (struct rte_eth_dev *) regsitered before.
+ * @param: The address of parameter (struct rte_eth_dev *) registered before.
*/
static void hinic_dev_interrupt_handler(void *param)
{
return err;
}
- /* init vlan offoad */
+ /* init VLAN offload */
err = hinic_vlan_offload_set(dev,
RTE_ETH_VLAN_STRIP_MASK | RTE_ETH_VLAN_FILTER_MASK);
if (err) {
/*
* tunnel packet, mask must be 0xff, spec value is 1;
* normal packet, mask must be 0, spec value is 0;
- * if tunnal packet, ucode use
+ * if tunnel packet, ucode use
* sip/dip/protocol/src_port/dst_dport from inner packet
*/
u32 tunnel_flag:8;
* END
* other members in mask and spec should set to 0x00.
* item->last should be NULL.
- * Please aware there's an asumption for all the parsers.
+ * Please be aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
/**
* Check if the flow rule is supported by nic.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int hinic_flow_validate(struct rte_eth_dev *dev,
mbuf_pkt = *tx_pkts++;
queue_info = 0;
- /* 1. parse sge and tx offlod info from mbuf */
+ /* 1. parse sge and tx offload info from mbuf */
if (unlikely(!hinic_get_sge_txoff_info(mbuf_pkt,
&sqe_info, &off_info))) {
txq->txq_stats.off_errs++;
for (i = 0; i < MAX_CAPS_BIT; i++) {
if (!(caps_masked & BIT_ULL(i)))
continue;
- hns3_info(hw, "mask capabiliy: id-%u, name-%s.",
+ hns3_info(hw, "mask capability: id-%u, name-%s.",
i, hns3_get_caps_name(i));
}
}
return 0;
/*
- * Requiring firmware to enable some features, firber port can still
+ * Requiring firmware to enable some features, fiber port can still
* work without it, but copper port can't work because the firmware
* fails to take over the PHY.
*/
hw->intr_tqps_num = RTE_MIN(vec, hw->tqps_num);
for (i = 0; i < hw->intr_tqps_num; i++) {
/*
- * Set gap limiter/rate limiter/quanity limiter algorithm
+ * Set gap limiter/rate limiter/quantity limiter algorithm
* configuration for interrupt coalesce of queue's interrupt.
*/
hns3_set_queue_intr_gl(hw, i, HNS3_RING_GL_RX,
* IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
* Tick * (2 ^ IR_s)
*
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int
hns3_shaper_para_calc(struct hns3_hw *hw, uint32_t ir, uint8_t shaper_level,
#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
const uint16_t tick_array[HNS3_SHAPER_LVL_CNT] = {
- 6 * 256, /* Prioriy level */
- 6 * 32, /* Prioriy group level */
+ 6 * 256, /* Priority level */
+ 6 * 32, /* Priority group level */
6 * 8, /* Port level */
6 * 256 /* Qset level */
};
ret = hns3_dcb_schd_setup_hw(hw);
if (ret) {
- hns3_err(hw, "dcb schdule configure failed! ret = %d", ret);
+ hns3_err(hw, "dcb schedule configure failed! ret = %d", ret);
return ret;
}
* hns3_dcb_pfc_enable - Enable priority flow control
* @dev: pointer to ethernet device
*
- * Configures the pfc settings for one porority.
+ * Configures the pfc settings for one priority.
*/
int
hns3_dcb_pfc_enable(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *pfc_conf)
hns3_set_bit(req->vport_vlan_cfg, HNS3_SHOW_TAG2_EN_B,
vcfg->vlan2_vlan_prionly ? 1 : 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG1_EN_B,
vcfg->strip_tag1_discard_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_DISCARD_TAG2_EN_B,
vcfg->insert_tag2_en ? 1 : 0);
hns3_set_bit(req->vport_vlan_cfg, HNS3_CFG_NIC_ROCE_SEL_B, 0);
- /* firmwall will ignore this configuration for PCI_REVISION_ID_HIP08 */
+ /* firmware will ignore this configuration for PCI_REVISION_ID_HIP08 */
hns3_set_bit(req->vport_vlan_cfg, HNS3_TAG_SHIFT_MODE_EN_B,
vcfg->tag_shift_mode_en ? 1 : 0);
* hns3_rx_buffer_calc: calculate the rx private buffer size for all TCs
* @hw: pointer to struct hns3_hw
* @buf_alloc: pointer to buffer calculation data
- * @return: 0: calculate sucessful, negative: fail
+ * @return: 0: calculate successful, negative: fail
*/
static int
hns3_rx_buffer_calc(struct hns3_hw *hw, struct hns3_pkt_buf_alloc *buf_alloc)
}
/*
- * Validity of supported_speed for firber and copper media type can be
+ * Validity of supported_speed for fiber and copper media type can be
* guaranteed by the following policy:
* Copper:
* Although the initialization of the phy in the firmware may not be
* completed, the firmware can guarantees that the supported_speed is
* an valid value.
* Firber:
- * If the version of firmware supports the acitive query way of the
+ * If the version of firmware supports the active query way of the
* HNS3_OPC_GET_SFP_INFO opcode, the supported_speed can be obtained
* through it. If unsupported, use the SFP's speed as the value of the
* supported_speed.
/*
* Flow control auto-negotiation is not supported for fiber and
- * backpalne media type.
+ * backplane media type.
*/
case HNS3_MEDIA_TYPE_FIBER:
case HNS3_MEDIA_TYPE_BACKPLANE:
}
/*
- * FEC mode order defined in hns3 hardware is inconsistend with
+ * FEC mode order defined in hns3 hardware is inconsistent with
* that defined in the ethdev library. So the sequence needs
* to be converted.
*/
uint8_t tc_sch_mode; /* 0: sp; 1: dwrr */
uint8_t pgid;
uint32_t bw_limit;
- uint8_t up_to_tc_map; /* user priority maping on the TC */
+ uint8_t up_to_tc_map; /* user priority mapping on the TC */
};
struct hns3_dcb_info {
/*
* vlan mode.
* value range:
- * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHFIT_AND_DISCARD_MODE
+ * HNS3_SW_SHIFT_AND_DISCARD_MODE/HNS3_HW_SHIFT_AND_DISCARD_MODE
*
* - HNS3_SW_SHIFT_AND_DISCARD_MODE
* For some versions of hardware network engine, because of the
* hardware limitation, PMD needs to detect the PVID status
- * to work with haredware to implement PVID-related functions.
+ * to work with hardware to implement PVID-related functions.
* For example, driver need discard the stripped PVID tag to ensure
* the PVID will not report to mbuf and shift the inserted VLAN tag
* to avoid port based VLAN covering it.
HNS3_MP_REQ_MAX
};
-/* Pameters for IPC. */
+/* Parameters for IPC. */
struct hns3_mp_param {
enum hns3_mp_req_type type;
int port_id;
if (ret == -EPERM) {
hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
old_addr);
- hns3_warn(hw, "Has permanet mac addr(%s) for vf",
+ hns3_warn(hw, "Has permanent mac addr(%s) for vf",
mac_str);
} else {
hns3_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE,
* 1. The promiscuous/allmulticast mode can be configured successfully
* only based on the trusted VF device. If based on the non trusted
* VF device, configuring promiscuous/allmulticast mode will fail.
- * The hns3 VF device can be confiruged as trusted device by hns3 PF
+ * The hns3 VF device can be configured as trusted device by hns3 PF
* kernel ethdev driver on the host by the following command:
* "ip link set <eth num> vf <vf id> turst on"
* 2. After the promiscuous mode is configured successfully, hns3 VF PMD
* filter is still effective even in promiscuous mode. If upper
* applications don't call rte_eth_dev_vlan_filter API function to
* set vlan based on VF device, hns3 VF PMD will can't receive
- * the packets with vlan tag in promiscuoue mode.
+ * the packets with vlan tag in promiscuous mode.
*/
hns3_cmd_setup_basic_desc(&desc, HNS3_OPC_MBX_VF_TO_PF, false);
req->msg[0] = HNS3_MBX_SET_PROMISC_MODE;
uint32_t flags;
uint32_t fd_id; /* APP marked unique value for this rule. */
uint8_t action;
- /* VF id, avaiblable when flags with HNS3_RULE_FLAG_VF_ID. */
+ /* VF id, available when flags with HNS3_RULE_FLAG_VF_ID. */
uint8_t vf_id;
/*
* equal 0 when action is drop.
*
* @param actions[in]
* @param rule[out]
- * NIC specfilc actions derived from the actions.
+ * NIC specific actions derived from the actions.
* @param error[out]
*/
static int
* Queue region is implemented by FDIR + RSS in hns3 hardware,
* the FDIR's action is one queue region (start_queue_id and
* queue_num), then RSS spread packets to the queue region by
- * RSS algorigthm.
+ * RSS algorithm.
*/
case RTE_FLOW_ACTION_TYPE_RSS:
ret = hns3_handle_action_queue_region(dev, actions,
if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
- "Ver/protocal is not supported in NVGRE");
+ "Ver/protocol is not supported in NVGRE");
/* TNI must be totally masked or not. */
if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
- "Ver/protocal is not supported in GENEVE");
+ "Ver/protocol is not supported in GENEVE");
/* VNI must be totally masked or not. */
if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
}
/*
- * This function is used to parse rss action validatation.
+ * This function is used to parse rss action validation.
*/
static int
hns3_parse_rss_filter(struct rte_eth_dev *dev,
/*
* Check if the flow rule is supported by hns3.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
mbx_time_limit = (uint32_t)hns->mbx_time_limit_ms * US_PER_MS;
while (wait_time < mbx_time_limit) {
if (__atomic_load_n(&hw->reset.disable_cmd, __ATOMIC_RELAXED)) {
- hns3_err(hw, "Don't wait for mbx respone because of "
+ hns3_err(hw, "Don't wait for mbx response because of "
"disable_cmd");
return -EBUSY;
}
if (is_reset_pending(hns)) {
hw->mbx_resp.req_msg_data = 0;
- hns3_err(hw, "Don't wait for mbx respone because of "
+ hns3_err(hw, "Don't wait for mbx response because of "
"reset pending");
return -EIO;
}
HNS3_MBX_GET_RETA, /* (VF -> PF) get RETA */
HNS3_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
HNS3_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
- HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate respone to VF */
+ HNS3_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
HNS3_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
HNS3_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
HNS3_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
struct hns3_rss_conf {
/* RSS parameters :algorithm, flow_types, key, queue */
struct rte_flow_action_rss conf;
- uint8_t hash_algo; /* hash function type definited by hardware */
+ uint8_t hash_algo; /* hash function type defined by hardware */
uint8_t key[HNS3_RSS_KEY_SIZE]; /* Hash key */
struct hns3_rss_tuple_cfg rss_tuple_sets;
uint16_t rss_indirection_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
* For hns3 VF device, whether it needs to process PVID depends
* on the configuration of PF kernel mode netdevice driver. And the
* related PF configuration is delivered through the mailbox and finally
- * reflectd in port_base_vlan_cfg.
+ * reflected in port_base_vlan_cfg.
*/
if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
rxq->pvid_sw_discard_en = hw->port_base_vlan_cfg.state ==
* For hns3 VF device, whether it needs to process PVID depends
* on the configuration of PF kernel mode netdev driver. And the
* related PF configuration is delivered through the mailbox and finally
- * reflectd in port_base_vlan_cfg.
+ * reflected in port_base_vlan_cfg.
*/
if (hns->is_vf || hw->vlan_mode == HNS3_SW_SHIFT_AND_DISCARD_MODE)
txq->pvid_sw_shift_en = hw->port_base_vlan_cfg.state ==
* in Tx direction based on hns3 network engine. So when the number of
* VLANs in the packets represented by rxm plus the number of VLAN
* offload by hardware such as PVID etc, exceeds two, the packets will
- * be discarded or the original VLAN of the packets will be overwitted
+ * be discarded or the original VLAN of the packets will be overwritten
* by hardware. When the PF PVID is enabled by calling the API function
* named rte_eth_dev_set_vlan_pvid or the VF PVID is enabled by the hns3
* PF kernel ether driver, the outer VLAN tag will always be the PVID.
/*
* The inner l2 length of mbuf is the sum of outer l4 length,
* tunneling header length and inner l2 length for a tunnel
- * packect. But in hns3 tx descriptor, the tunneling header
+ * packet. But in hns3 tx descriptor, the tunneling header
* length is contained in the field of outer L4 length.
* Therefore, driver need to calculate the outer L4 length and
* inner L2 length.
tmp_outer |= hns3_gen_field_val(HNS3_TXD_TUNTYPE_M,
HNS3_TXD_TUNTYPE_S, HNS3_TUN_NVGRE);
/*
- * For NVGRE tunnel packect, the outer L4 is empty. So only
+ * For NVGRE tunnel packet, the outer L4 is empty. So only
* fill the NVGRE header length to the outer L4 field.
*/
tmp_outer |= hns3_gen_field_val(HNS3_TXD_L4LEN_M,
* mbuf, but for hns3 descriptor, it is contained in the outer L4. So,
* there is a need that switching between them. To avoid multiple
* calculations, the length of the L2 header include the outer and
- * inner, will be filled during the parsing of tunnel packects.
+ * inner, will be filled during the parsing of tunnel packets.
*/
if (!(ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)) {
/*
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
- * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
* header for TSO packets
*/
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
if (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM) {
struct rte_udp_hdr *udp_hdr;
/*
- * If OUTER_UDP_CKSUM is support, HW can caclulate the pseudo
+ * If OUTER_UDP_CKSUM is support, HW can calculate the pseudo
* header for TSO packets
*/
if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
/*
* If packet len bigger than mtu when recv with no-scattered algorithm,
- * the first n bd will without FE bit, we need process this sisution.
+ * the first n bd will without FE bit, we need process this situation.
* Note: we don't need add statistic counter because latest BD which
* with FE bit will mark HNS3_RXD_L2E_B bit.
*/
cnt = hns3_read_dev(rxq, HNS3_RING_RX_PKTNUM_RECORD_REG);
/*
- * Read hardware and software in adjacent positions to minumize
+ * Read hardware and software in adjacent positions to minimize
* the timing variance.
*/
rte_stats->ierrors += rxq->err_stats.l2_errors +
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
- * all avalible statistics.
+ * all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
if (ret != I40E_SUCCESS)
PMD_DRV_LOG(WARNING, "Fail to set phy mask");
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
i40e_dev_link_update(dev, 0);
}
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name,
rte_i40e_hw_port_strings[i].name,
count++;
}
- /* Get individiual stats from i40e_hw_port struct */
+ /* Get individual stats from i40e_hw_port struct */
for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_i40e_hw_port_strings[i].offset);
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR,
- "VSI failed to get TC bandwdith configuration %u",
+ "VSI failed to get TC bandwidth configuration %u",
hw->aq.asq_last_status);
return ret;
}
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
return 0;
}
-/* Check if there exists the ehtertype filter */
+/* Check if there exists the ethertype filter */
struct i40e_ethertype_filter *
i40e_sw_ethertype_filter_lookup(struct i40e_ethertype_rule *ethertype_rule,
const struct i40e_ethertype_filter_input *input)
TAILQ_ENTRY(i40e_tunnel_filter) rules;
struct i40e_tunnel_filter_input input;
uint8_t is_to_vf; /* 0 - to PF, 1 - to VF */
- uint16_t vf_id; /* VF id, avaiblable when is_to_vf is 1. */
+ uint16_t vf_id; /* VF id, available when is_to_vf is 1. */
uint16_t queue; /* Queue assigned to when match */
};
uint32_t tenant_id; /**< Tenant ID to match. VNI, GRE key... */
uint16_t queue_id; /**< Queue assigned to if match. */
uint8_t is_to_vf; /**< 0 - to PF, 1 - to VF */
- uint16_t vf_id; /**< VF id, avaiblable when is_to_vf is 1. */
+ uint16_t vf_id; /**< VF id, available when is_to_vf is 1. */
};
TAILQ_HEAD(i40e_flow_list, rte_flow);
/*
* If message statistics from a VF exceed the maximal limitation,
* the PF will ignore any new message from that VF for
- * 'ignor_second' time.
+ * 'ignore_second' time.
*/
uint32_t ignore_second;
};
};
/**
- * Strucute to store private data for each VF representor instance
+ * Structure to store private data for each VF representor instance
*/
struct i40e_vf_representor {
uint16_t switch_domain_id;
uint16_t vf_id;
/**< Virtual Function ID */
struct i40e_adapter *adapter;
- /**< Private data store of assocaiated physical function */
+ /**< Private data store of associated physical function */
struct i40e_eth_stats stats_offset;
/**< Zero-point of VF statistics*/
};
I40E_QRX_TAIL(rxq->vsi->base_queue);
rte_wmb();
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return err;
for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) {
if (flex_cfg->src_offset[i] >= I40E_MAX_FLX_SOURCE_OFF) {
- PMD_DRV_LOG(ERR, "exceeds maxmial payload limit.");
+ PMD_DRV_LOG(ERR, "exceeds maximal payload limit.");
return -EINVAL;
}
}
memset(flex_pit, 0, sizeof(flex_pit));
num = i40e_srcoff_to_flx_pit(flex_cfg->src_offset, flex_pit);
if (num > I40E_MAX_FLXPLD_FIED) {
- PMD_DRV_LOG(ERR, "exceeds maxmial number of flex fields.");
+ PMD_DRV_LOG(ERR, "exceeds maximal number of flex fields.");
return -EINVAL;
}
for (i = 0; i < num; i++) {
uint8_t pctype = fdir_input->pctype;
struct i40e_customized_pctype *cus_pctype;
- /* raw pcket template - just copy contents of the raw packet */
+ /* raw packet template - just copy contents of the raw packet */
if (fdir_input->flow_ext.pkt_template) {
memcpy(raw_pkt, fdir_input->flow.raw_flow.packet,
fdir_input->flow.raw_flow.length);
&check_filter.fdir.input);
if (!node) {
PMD_DRV_LOG(ERR,
- "There's no corresponding flow firector filter!");
+ "There's no corresponding flow director filter!");
return -EINVAL;
}
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Exceeds maxmial payload limit.");
+ "Exceeds maximal payload limit.");
return -rte_errno;
}
vf->request_caps = *(uint32_t *)msg;
/* enable all RSS by default,
- * doesn't support hena setting by virtchnnl yet.
+ * doesn't support hena setting by virtchnl yet.
*/
if (vf->request_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
I40E_WRITE_REG(hw, I40E_VFQF_HENA1(0, vf->vf_idx),
if ((map->rxq_map < qbit_max) && (map->txq_map < qbit_max)) {
i40e_pf_config_irq_link_list(vf, map);
} else {
- /* configured queue size excceed limit */
+ /* configured queue size exceed limit */
ret = I40E_ERR_PARAM;
goto send_msg;
}
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update rx tail register */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
i40e_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
}
if (rxq->rx_deferred_start)
- PMD_DRV_LOG(WARNING, "RX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "RX queue %u is deferred start",
rx_queue_id);
err = i40e_alloc_rx_queue_mbufs(rxq);
return err;
}
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE);
}
if (txq->tx_deferred_start)
- PMD_DRV_LOG(WARNING, "TX queue %u is deferrd start",
+ PMD_DRV_LOG(WARNING, "TX queue %u is deferred start",
tx_queue_id);
/*
PMD_DRV_LOG(ERR, "Can't use default burst.");
return -EINVAL;
}
- /* check scatterred conflict */
+ /* check scattered conflict */
if (!dev->data->scattered_rx && use_scattered_rx) {
PMD_DRV_LOG(ERR, "Scattered rx is required.");
return -EINVAL;
rxq->rx_deferred_start = rx_conf->rx_deferred_start;
rxq->offloads = offloads;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = I40E_MAX_RING_DESC;
/**
*/
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
if (tx_conf->tx_rs_thresh > 0)
if (rxq->max_pkt_len > buf_size)
dev_data->scattered_rx = 1;
- /* Init the RX tail regieter. */
+ /* Init the RX tail register. */
I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1);
return 0;
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
desc_to_olflags_v(descs, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll((vec_ld(0,
(vector unsigned long *)&staterr)[0]));
nb_pkts_recd += var;
vreinterpretq_u8_u32(l3_l4e)));
/* then we shift left 1 bit */
l3_l4e = vshlq_n_u32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = vandq_u32(l3_l4e, cksum_mask);
vlan0 = vorrq_u32(vlan0, rss);
I40E_UINT16_BIT - 1));
stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
if (unlikely(stat == 0)) {
nb_pkts_recd += RTE_I40E_DESCS_PER_LOOP;
} else {
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
/* then we shift left 1 bit */
l3_l4e = _mm_slli_epi32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
__m128i v_fdir_ol_flags = descs_to_fdir_16b(desc_fltstat,
descs, rx_pkts);
#endif
- /* OR in ol_flag bits after descriptor speicific extraction */
+ /* OR in ol_flag bits after descriptor specific extraction */
vlan0 = _mm_or_si128(vlan0, v_fdir_ol_flags);
}
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_I40E_DESCS_PER_LOOP))
/* Get all TCs' bandwidth. */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
if (veb->enabled_tc & BIT_ULL(i)) {
- /* For rubust, if bandwidth is 0, use 1 instead. */
+ /* For robust, if bandwidth is 0, use 1 instead. */
if (veb->bw_info.bw_ets_share_credits[i])
ets_data.tc_bw_share_credits[i] =
veb->bw_info.bw_ets_share_credits[i];
j = 0;
vf->rss_lut[i] = j;
}
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure RSS */
ret = iavf_configure_rss_lut(adapter);
if (ret)
return ret;
"vector %u are mapping to all Rx queues",
vf->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
vf->nb_msix =
}
rte_memcpy(vf->rss_lut, lut, reta_size);
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure RSS */
ret = iavf_configure_rss_lut(adapter);
if (ret) /* revert back */
rte_memcpy(vf->rss_lut, lut, reta_size);
* 16B - 3
*
* but we also need the IV Length for TSO to correctly calculate the total
- * header length so placing it in the upper 6-bits here for easier reterival.
+ * header length so placing it in the upper 6-bits here for easier retrieval.
*/
static inline uint8_t
calc_ipsec_desc_iv_len_field(uint16_t iv_sz)
/**
* Send SA add virtual channel request to Inline IPsec driver.
*
- * Inline IPsec driver expects SPI and destination IP adderss to be in host
+ * Inline IPsec driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
/**
* Send virtual channel security policy add request to IES driver.
*
- * IES driver expects SPI and destination IP adderss to be in host
+ * IES driver expects SPI and destination IP address to be in host
* order, but DPDK APIs are network order, therefore we need to do a htonl
* conversion of these parameters.
*/
request->req_id = (uint16_t)0xDEADBEEF;
/**
- * SA delete supports deletetion of 1-8 specified SA's or if the flag
+ * SA delete supports deletion of 1-8 specified SA's or if the flag
* field is zero, all SA's associated with VF will be deleted.
*/
if (sess) {
md = RTE_MBUF_DYNFIELD(m, iavf_sctx->pkt_md_offset,
struct iavf_ipsec_crypto_pkt_metadata *);
- /* Set immutatable metadata values from session template */
+ /* Set immutable metadata values from session template */
memcpy(md, &iavf_sess->pkt_metadata_template,
sizeof(struct iavf_ipsec_crypto_pkt_metadata));
capabilities[number_of_capabilities].op = RTE_CRYPTO_OP_TYPE_UNDEFINED;
/**
- * Iterate over each virtchl crypto capability by crypto type and
+ * Iterate over each virtchnl crypto capability by crypto type and
* algorithm.
*/
for (i = 0; i < VIRTCHNL_IPSEC_MAX_CRYPTO_CAP_NUM; i++) {
/**
* Update the security capabilities struct with the runtime discovered
* crypto capabilities, except for last element of the array which is
- * the null terminatation
+ * the null termination
*/
for (i = 0; i < ((sizeof(iavf_security_capabilities) /
sizeof(iavf_security_capabilities[0])) - 1); i++) {
};
-/* IPsec Crypto Packet Metaday offload flags */
+/* IPsec Crypto Packet Metadata offload flags */
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IS_TUN (0x1 << 0)
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_ESN (0x1 << 1)
#define IAVF_IPSEC_CRYPTO_OL_FLAGS_IPV6_EXT_HDRS (0x1 << 2)
return -ENOMEM;
}
- /* Allocate the maximun number of RX ring hardware descriptor with
- * a liitle more to support bulk allocate.
+ /* Allocate the maximum number of RX ring hardware descriptor with
+ * a little more to support bulk allocate.
*/
len = IAVF_MAX_RING_DESC + IAVF_RX_MAX_BURST;
ring_size = RTE_ALIGN(len * sizeof(union iavf_rx_desc),
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
/* then we shift left 1 bit */
l3_l4e = _mm_slli_epi32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
(vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
- " than (%u.%u) to support Adapative VF",
+ " than (%u.%u) to support Adaptive VF",
VIRTCHNL_VERSION_MAJOR_START,
VIRTCHNL_VERSION_MAJOR_START);
return -1;
err = iavf_execute_vf_cmd(adapter, &args, 0);
if (err) {
- PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
+ PMD_DRV_LOG(ERR, "fail to check flow director rule");
return err;
}
j = 0;
hw->rss_lut[i] = j;
}
- /* send virtchnnl ops to configure rss*/
+ /* send virtchnl ops to configure RSS */
ret = ice_dcf_configure_rss_lut(hw);
if (ret)
return ret;
"vector %u are mapping to all Rx queues",
hw->msix_base);
} else {
- /* If Rx interrupt is reuquired, and we can use
+ /* If Rx interrupt is required, and we can use
* multi interrupts, then the vec is from 1
*/
hw->nb_msix = RTE_MIN(hw->vf_res->max_vectors,
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
}
/* At the beginning, only TC0. */
- /* What we need here is the maximam number of the TX queues.
+ /* What we need here is the maximum number of the TX queues.
* Currently vsi->nb_qps means it.
* Correct it if any change.
*/
goto rx_err;
}
- /* enable Rx interrput and mapping Rx queue to interrupt vector */
+ /* enable Rx interrupt and mapping Rx queue to interrupt vector */
if (ice_rxq_intr_setup(dev))
return -EIO;
ice_dev_set_link_up(dev);
- /* Call get_link_info aq commond to enable/disable LSE */
+ /* Call get_link_info aq command to enable/disable LSE */
ice_link_update(dev, 0);
pf->adapter_stopped = false;
count++;
}
- /* Get individiual stats from ice_hw_port struct */
+ /* Get individual stats from ice_hw_port struct */
for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
xstats[count].value =
*(uint64_t *)((char *)hw_stats +
count++;
}
- /* Get individiual stats from ice_hw_port struct */
+ /* Get individual stats from ice_hw_port struct */
for (i = 0; i < ICE_NB_HW_PORT_XSTATS; i++) {
strlcpy(xstats_names[count].name, ice_hw_port_strings[i].name,
sizeof(xstats_names[count].name));
rxq->proto_xtr = pf->proto_xtr != NULL ?
pf->proto_xtr[queue_idx] : PROTO_XTR_NONE;
- /* Allocate the maximun number of RX ring hardware descriptor. */
+ /* Allocate the maximum number of RX ring hardware descriptor. */
len = ICE_MAX_RING_DESC;
/**
tx_free_thresh = (uint16_t)(tx_conf->tx_free_thresh ?
tx_conf->tx_free_thresh :
ICE_DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh =
(ICE_DEFAULT_TX_RSBIT_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : ICE_DEFAULT_TX_RSBIT_THRESH;
rxdp[i].read.pkt_addr = dma_addr;
}
- /* Update rx tail regsiter */
+ /* Update Rx tail register */
ICE_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_free_trigger);
rxq->rx_free_trigger =
* threshold of the queue, advance the Receive Descriptor Tail (RDT)
* register. Update the RDT with the value of the last processed RX
* descriptor minus 1, to guarantee that the RDT register is never
- * equal to the RDH register, which creates a "full" ring situtation
+ * equal to the RDH register, which creates a "full" ring situation
* from the hardware point of view.
*/
nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
ice_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
- /* Determin if RS bit needs to be set */
+ /* Determine if RS bit needs to be set */
if (txq->tx_tail > txq->tx_next_rs) {
txr[txq->tx_next_rs].cmd_type_offset_bsz |=
rte_cpu_to_le_64(((uint64_t)ICE_TX_DESC_CMD_RS) <<
__m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6);
__m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask);
flags = _mm_or_si128(l3_l4_flags, l4_outer_flags);
- /* we need to mask out the reduntant bits introduced by RSS or
+ /* we need to mask out the redundant bits introduced by RSS or
* VLAN fields.
*/
flags = _mm_and_si128(flags, cksum_mask);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb0);
ice_rx_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != ICE_DESCS_PER_LOOP))
/* search the filter array */
for (; i < IGC_MAX_NTUPLE_FILTERS; i++) {
if (igc->ntuple_filters[i].hash_val) {
- /* compare the hase value */
+ /* compare the hash value */
if (ntuple->hash_val ==
igc->ntuple_filters[i].hash_val)
/* filter be found, return index */
sw_ring[tx_id].mbuf = NULL;
sw_ring[tx_id].last_id = tx_id;
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
} while (tx_id != tx_next);
* Walk the list and find the next mbuf, if any.
*/
do {
- /* Move to next segemnt. */
+ /* Move to next segment. */
tx_id = sw_ring[tx_id].next_id;
if (sw_ring[tx_id].mbuf)
* enum ionic_fw_control_oper - FW control operations
* @IONIC_FW_RESET: Reset firmware
* @IONIC_FW_INSTALL: Install firmware
- * @IONIC_FW_ACTIVATE: Acticate firmware
+ * @IONIC_FW_ACTIVATE: Activate firmware
*/
enum ionic_fw_control_oper {
IONIC_FW_RESET = 0,
};
/**
- * struct ionic_fw_control_comp - Firmware control copletion
+ * struct ionic_fw_control_comp - Firmware control completion
* @status: Status of the command (enum ionic_status_code)
* @comp_index: Index in the descriptor ring for which this is the completion
* @slot: Slot where the firmware was installed
* and @identity->intr_coal_div to convert from
* usecs to device units:
*
- * coal_init = coal_usecs * coal_mutl / coal_div
+ * coal_init = coal_usecs * coal_mult / coal_div
*
* When an interrupt is sent the interrupt
* coalescing timer current value
RTE_CACHE_LINE_SIZE,
afu_dev->device.numa_node);
if (!hw) {
- IPN3KE_AFU_PMD_ERR("failed to allocate hardwart data");
+ IPN3KE_AFU_PMD_ERR("failed to allocate hardware data");
retval = -ENOMEM;
return -ENOMEM;
}
};
/**
- * Strucute to store private data for each representor instance
+ * Structure to store private data for each representor instance
*/
struct ipn3ke_rpst {
TAILQ_ENTRY(ipn3ke_rpst) next; /**< Next in device list. */
uint16_t i40e_pf_eth_port_id;
struct rte_eth_link ori_linfo;
struct ipn3ke_tm_internals tm;
- /**< Private data store of assocaiated physical function */
+ /**< Private data store of associated physical function */
struct rte_ether_addr mac_addr;
};
IPN3KE_AFU_PMD_DEBUG("IPN3KE_CLF_LKUP_ENABLE: %x\n", data);
- /* configure rx parse config, settings associatied with VxLAN */
+ /* configure rx parse config, settings associated with VxLAN */
IPN3KE_MASK_WRITE_REG(hw,
IPN3KE_CLF_RX_PARSE_CFG,
0,
count++;
}
- /* Get individiual stats from ipn3ke_rpst_hw_port */
+ /* Get individual stats from ipn3ke_rpst_hw_port */
for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
xstats[count].value = *(uint64_t *)(((char *)(&hw_stats)) +
ipn3ke_rpst_hw_port_strings[i].offset);
count++;
}
- /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ /* Get individual stats from ipn3ke_rpst_rxq_pri */
for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
xstats[count].value =
}
}
- /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ /* Get individual stats from ipn3ke_rpst_txq_prio */
for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < IPN3KE_RPST_PRIO_XSTATS_CNT; prio++) {
xstats[count].value =
count++;
}
- /* Get individiual stats from ipn3ke_rpst_hw_port */
+ /* Get individual stats from ipn3ke_rpst_hw_port */
for (i = 0; i < IPN3KE_RPST_HW_PORT_XSTATS_CNT; i++) {
snprintf(xstats_names[count].name,
sizeof(xstats_names[count].name),
count++;
}
- /* Get individiual stats from ipn3ke_rpst_rxq_pri */
+ /* Get individual stats from ipn3ke_rpst_rxq_pri */
for (i = 0; i < IPN3KE_RPST_RXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < 8; prio++) {
snprintf(xstats_names[count].name,
}
}
- /* Get individiual stats from ipn3ke_rpst_txq_prio */
+ /* Get individual stats from ipn3ke_rpst_txq_prio */
for (i = 0; i < IPN3KE_RPST_TXQ_PRIO_XSTATS_CNT; i++) {
for (prio = 0; prio < 8; prio++) {
snprintf(xstats_names[count].name,
endif
#
-# Add the experimenatal APIs called from this PMD
+# Add the experimental APIs called from this PMD
# rte_eth_switch_domain_alloc()
# rte_eth_dev_create()
# rte_eth_dev_destroy()
#define BYPASS_STATUS_OFF_MASK 3
-/* Macros to check for invlaid function pointers. */
+/* Macros to check for invalid function pointers. */
#define FUNC_PTR_OR_ERR_RET(func, retval) do { \
if ((func) == NULL) { \
PMD_DRV_LOG(ERR, "%s:%d function not supported", \
* ixgbe_bypass_valid_rd_generic - Verify valid return from bit-bang.
*
* If we send a write we can't be sure it took until we can read back
- * that same register. It can be a problem as some of the feilds may
+ * that same register. It can be a problem as some of the fields may
* for valid reasons change between the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
}
/**
- * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Regiter.
+ * ixgbe_bypass_set_generic - Set a bypass field in the FW CTRL Register.
*
* @hw: pointer to hardware structure
* @cmd: The control word we are setting.
if (dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
dev->data->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_RSS_HASH;
- /* multipe queue mode checking */
+ /* multiple queue mode checking */
ret = ixgbe_check_mq_mode(dev);
if (ret != 0) {
PMD_DRV_LOG(ERR, "ixgbe_check_mq_mode fails with %d.",
}
}
- /* confiugre msix for sleep until rx interrupt */
+ /* configure MSI-X for sleep until Rx interrupt */
ixgbe_configure_msix(dev);
/* initialize transmission unit */
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link up is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
if (hw->mac.type == ixgbe_mac_82599EB) {
#ifdef RTE_LIBRTE_IXGBE_BYPASS
if (hw->device_id == IXGBE_DEV_ID_82599_BYPASS) {
- /* Not suported in bypass mode */
+ /* Not supported in bypass mode */
PMD_INIT_LOG(ERR, "Set link down is not supported "
"by device id 0x%x", hw->device_id);
return -ENOTSUP;
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
* @param handle
* Pointer to interrupt handle.
* @param param
- * The address of parameter (struct rte_eth_dev *) regsitered before.
+ * The address of parameter (struct rte_eth_dev *) registered before.
*
* @return
* void
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
- * as IXGBE_VF_MAXMSIVECOTR = 1
+ * as IXGBE_VF_MAXMSIVECTOR = 1
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
rte_intr_vec_list_index_set(intr_handle, q_idx,
* @param
* dev: Pointer to struct rte_eth_dev.
* index: the index the filter allocates.
- * filter: ponter to the filter that will be added.
+ * filter: pointer to the filter that will be added.
* rx_queue: the queue id the filter assigned to.
*
* @return
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
return 0;
#define IXGBE_LPBK_NONE 0x0 /* Default value. Loopback is disabled. */
#define IXGBE_LPBK_TX_RX 0x1 /* Tx->Rx loopback operation is enabled. */
/* X540-X550 specific loopback operations */
-#define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negociation enable (default = 1) */
+#define IXGBE_MII_AUTONEG_ENABLE 0x1000 /* Auto-negotiation enable (default = 1) */
#define IXGBE_MAX_JUMBO_FRAME_SIZE 0x2600 /* Maximum Jumbo frame size. */
switch (info->mask.tunnel_type_mask) {
case 0:
- /* Mask turnnel type */
+ /* Mask tunnel type */
fdiripv6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE;
break;
case 1:
}
/**
- * Please aware there's an asumption for all the parsers.
+ * Please be aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
/**
* Check if the flow rule is supported by ixgbe.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
return -1;
}
- /* Disable and clear Rx SPI and key table table entryes*/
+ /* Disable and clear Rx SPI and key table table entries*/
reg_val = IPSRXIDX_WRITE | IPSRXIDX_TABLE_SPI | (sa_index << 3);
IXGBE_WRITE_REG(hw, IXGBE_IPSRXSPI, 0);
IXGBE_WRITE_REG(hw, IXGBE_IPSRXIPIDX, 0);
/* PFDMA Tx General Switch Control Enables VMDQ loopback */
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
/* clear VMDq map to scan rar 127 */
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
* register.
* Update the RDT with the value of the last processed RX descriptor
* minus 1, to guarantee that the RDT register is never equal to the
- * RDH register, which creates a "full" ring situtation from the
+ * RDH register, which creates a "full" ring situation from the
* hardware point of view...
*/
if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
*/
tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
- /* force tx_rs_thresh to adapt an aggresive tx_free_thresh */
+ /* force tx_rs_thresh to adapt an aggressive tx_free_thresh */
tx_rs_thresh = (DEFAULT_TX_RS_THRESH + tx_free_thresh > nb_desc) ?
nb_desc - tx_free_thresh : DEFAULT_TX_RS_THRESH;
if (tx_conf->tx_rs_thresh > 0)
dev->data->port_id);
dev->rx_pkt_burst = ixgbe_recv_pkts_lro_bulk_alloc;
} else {
- PMD_INIT_LOG(DEBUG, "Using Regualr (non-vector, "
+ PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
"single allocation) "
"Scattered Rx callback "
"(port=%d).",
/*
* Setup the Checksum Register.
* Disable Full-Packet Checksum which is mutually exclusive with RSS.
- * Enable IP/L4 checkum computation by hardware if requested to do so.
+ * Enable IP/L4 checksum computation by hardware if requested to do so.
*/
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
rxcsum |= IXGBE_RXCSUM_PCSD;
desc_to_ptype_v(descs, rxq->pkt_type_mask, &rx_pkts[pos]);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != RTE_IXGBE_DESCS_PER_LOOP))
break;
case MEMIF_MSG_TYPE_INIT:
/*
- * This cc does not have an interface asociated with it.
+ * This cc does not have an interface associated with it.
* If suitable interface is found it will be assigned here.
*/
ret = memif_msg_receive_init(cc, &msg);
if (ret < 0)
return ret;
} else {
- /* create one memory region contaning rings and buffers */
+ /* create one memory region containing rings and buffers */
ret = memif_region_init_shm(dev, /* has buffers */ 1);
if (ret < 0)
return ret;
MLX4_MP_REQ_STOP_RXTX,
};
-/* Pameters for IPC. */
+/* Parameters for IPC. */
struct mlx4_mp_param {
enum mlx4_mp_req_type type;
int port_id;
* Pointer to Ethernet device structure.
*
* @return
- * alwasy 0 on success
+ * always 0 on success
*/
int
mlx4_stats_reset(struct rte_eth_dev *dev)
* Pointer to RQ channel object, which includes the channel fd
*
* @param[out] fd
- * The file descriptor (representing the intetrrupt) used in this channel.
+ * The file descriptor (representing the interrupt) used in this channel.
*
* @return
* 0 on successfully setting the fd to non-blocking, non-zero otherwise.
priv->drop_queue.hrxq = mlx5_drop_action_create(eth_dev);
if (!priv->drop_queue.hrxq)
goto error;
- /* Port representor shares the same max prioirity with pf port. */
+ /* Port representor shares the same max priority with pf port. */
if (!priv->sh->flow_priority_check_flag) {
/* Supported Verbs flow priority number detection. */
err = mlx5_flow_discover_priorities(eth_dev);
/*
* Force standalone bonding
* device for ROCE LAG
- * confgiurations.
+ * configurations.
*/
list[ns].info.master = 0;
list[ns].info.representor = 0;
}
if (ret) {
DRV_LOG(ERR, "Probe of PCI device " PCI_PRI_FMT " "
- "aborted due to proding failure of PF %u",
+ "aborted due to prodding failure of PF %u",
pci_dev->addr.domain, pci_dev->addr.bus,
pci_dev->addr.devid, pci_dev->addr.function,
eth_da.ports[p]);
/*
* Free the shared context in last turn, because the cleanup
* routines above may use some shared fields, like
- * mlx5_os_mac_addr_flush() uses ibdev_path for retrieveing
+ * mlx5_os_mac_addr_flush() uses ibdev_path for retrieving
* ifindex if Netlink fails.
*/
mlx5_free_shared_dev_ctx(priv->sh);
if (tmp != MLX5_RCM_NONE &&
tmp != MLX5_RCM_LIGHT &&
tmp != MLX5_RCM_AGGR) {
- DRV_LOG(ERR, "Unrecognize %s: \"%s\"", key, val);
+ DRV_LOG(ERR, "Unrecognized %s: \"%s\"", key, val);
rte_errno = EINVAL;
return -rte_errno;
}
break;
}
if (sh->dv_mark_mask && sh->dv_mark_mask != mark)
- DRV_LOG(WARNING, "metadata MARK mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata MARK mask mismatch %08X:%08X",
sh->dv_mark_mask, mark);
else
sh->dv_mark_mask = mark;
if (sh->dv_meta_mask && sh->dv_meta_mask != meta)
- DRV_LOG(WARNING, "metadata META mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata META mask mismatch %08X:%08X",
sh->dv_meta_mask, meta);
else
sh->dv_meta_mask = meta;
if (sh->dv_regc0_mask && sh->dv_regc0_mask != reg_c0)
- DRV_LOG(WARNING, "metadata reg_c0 mask mismatche %08X:%08X",
+ DRV_LOG(WARNING, "metadata reg_c0 mask mismatch %08X:%08X",
sh->dv_meta_mask, reg_c0);
else
sh->dv_regc0_mask = reg_c0;
uint32_t base_index;
/**< The next index that can be used without any free elements. */
uint32_t *curr; /**< Pointer to the index to pop. */
- uint32_t *last; /**< Pointer to the last element in the empty arrray. */
+ uint32_t *last; /**< Pointer to the last element in the empty array. */
uint32_t max_id; /**< Maximum id can be allocated from the pool. */
};
void *pp; /* Packet pacing context. */
uint16_t pp_id; /* Packet pacing context index. */
uint16_t ts_n; /* Number of captured timestamps. */
- uint16_t ts_p; /* Pointer to statisticks timestamp. */
+ uint16_t ts_p; /* Pointer to statistics timestamp. */
struct mlx5_txpp_ts *tsa; /* Timestamps sliding window stats. */
struct mlx5_txpp_ts ts; /* Cached completion id/timestamp. */
uint32_t sync_lost:1; /* ci/timestamp synchronization lost. */
uint32_t sample_ids[MLX5_GRAPH_NODE_SAMPLE_NUM];
};
-/* Pattern field dscriptor - how to translate flex pattern into samples. */
+/* Pattern field descriptor - how to translate flex pattern into samples. */
__extension__
struct mlx5_flex_pattern_field {
uint16_t width:6;
/* Shared DV/DR flow data section. */
uint32_t dv_meta_mask; /* flow META metadata supported mask. */
uint32_t dv_mark_mask; /* flow MARK metadata supported mask. */
- uint32_t dv_regc0_mask; /* available bits of metatada reg_c[0]. */
+ uint32_t dv_regc0_mask; /* available bits of metadata reg_c[0]. */
void *fdb_domain; /* FDB Direct Rules name space handle. */
void *rx_domain; /* RX Direct Rules name space handle. */
void *tx_domain; /* TX Direct Rules name space handle. */
}
/**
- * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
+ * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the device
* flow.
*
* @param[in] dev
if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
- "Geneve TLV opt length exceeeds the limit (31)");
+ "Geneve TLV opt length exceeds the limit (31)");
/* Check if class type and length masks are full. */
if (full_mask.option_class != mask->option_class ||
full_mask.option_type != mask->option_type ||
* subflow.
*
* @param[in] dev_flow
- * Pointer the created preifx subflow.
+ * Pointer the created prefix subflow.
*
* @return
* The layers get from prefix subflow.
[3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
};
- /* Fill the register fileds in the flow. */
+ /* Fill the register fields in the flow. */
ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
if (ret < 0)
return NULL;
/*
* The copy Flows are not included in any list. There
* ones are referenced from other Flows and can not
- * be applied, removed, deleted in ardbitrary order
+ * be applied, removed, deleted in arbitrary order
* by list traversing.
*/
mcp_res->rix_flow = flow_list_create(dev, MLX5_FLOW_TYPE_MCP,
/*
* If dev_flow is as one of the suffix flow, some actions in suffix
* flow may need some user defined item layer flags, and pass the
- * Metadate rxq mark flag to suffix flow as well.
+ * Metadata rxq mark flag to suffix flow as well.
*/
if (flow_split_info->prefix_layers)
dev_flow->handle->layers = flow_split_info->prefix_layers;
* @param[out] error
* Perform verbose error reporting if not NULL.
* @param[in] encap_idx
- * The encap action inndex.
+ * The encap action index.
*
* @return
* 0 on success, negative value otherwise
* @param type
* Flow type to be flushed.
* @param active
- * If flushing is called avtively.
+ * If flushing is called actively.
*/
void
mlx5_flow_list_flush(struct rte_eth_dev *dev, enum mlx5_flow_type type,
* Perform verbose error reporting if not NULL. PMDs initialize this
* structure in case of error only.
* @return
- * 0 on success, a nagative value otherwise.
+ * 0 on success, a negative value otherwise.
*/
int
mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
}
/**
- * tunnel offload functionalilty is defined for DV environment only
+ * tunnel offload functionality is defined for DV environment only
*/
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
__extension__
const struct mlx5_flow_tunnel *tunnel;
uint32_t group_id;
uint32_t external:1;
- uint32_t tunnel_offload:1; /* Tunnel offlod table or not. */
+ uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
uint32_t is_egress:1; /**< Egress table. */
uint32_t is_transfer:1; /**< Transfer table. */
uint32_t dummy:1; /**< DR table. */
/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
void *drv_flow; /**< pointer to driver flow object. */
uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
- uint32_t is_meter_flow_id:1; /**< Indate if flow_id is for meter. */
- uint32_t mark:1; /**< Metadate rxq mark flag. */
+ uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
+ uint32_t mark:1; /**< Metadata rxq mark flag. */
uint32_t fate_action:3; /**< Fate action type. */
uint32_t flex_item; /**< referenced Flex Item bitmask. */
union {
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg == REG_B)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM, item,
if (reg == REG_NON)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, action,
- "unavalable extended metadata register");
+ "unavailable extended metadata register");
if (reg != REG_A && reg != REG_B) {
struct mlx5_priv *priv = dev->data->dev_private;
* Pointer to error structure.
*
* @return
- * 0 on success, a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
mlx5_flow_validate_action_meter(struct rte_eth_dev *dev,
* - Explicit decap action is prohibited by the tunnel offload API.
* - Drop action in tunnel steer rule is prohibited by the API.
* - Application cannot use MARK action because it's value can mask
- * tunnel default miss nitification.
+ * tunnel default miss notification.
* - JUMP in tunnel match rule has no support in current PMD
* implementation.
* - TAG & META are reserved for future uses.
geneve_opt_v->option_type &&
geneve_opt_resource->length ==
geneve_opt_v->option_len) {
- /* We already have GENVE TLV option obj allocated. */
+ /* We already have GENEVE TLV option obj allocated. */
__atomic_fetch_add(&geneve_opt_resource->refcnt, 1,
__ATOMIC_RELAXED);
} else {
* Check flow matching criteria first, subtract misc5/4 length if flow
* doesn't own misc5/4 parameters. In some old rdma-core releases,
* misc5/4 are not supported, and matcher creation failure is expected
- * w/o subtration. If misc5 is provided, misc4 must be counted in since
+ * w/o subtraction. If misc5 is provided, misc4 must be counted in since
* misc5 is right after misc4.
*/
if (!(match_criteria & (1 << MLX5_MATCH_CRITERIA_ENABLE_MISC5_BIT))) {
goto error;
}
}
- /* create a dest array actioin */
+ /* create a dest array action */
ret = mlx5_os_flow_dr_create_flow_action_dest_array
(domain,
resource->num_of_dest,
* @param dev
* Ethernet device to translate flex item on.
* @param[in, out] matcher
- * Flow matcher to confgiure
+ * Flow matcher to configure
* @param[in, out] key
* Flow matcher value.
* @param[in] item
if (field->offset_shift > 15 || field->offset_shift < 0)
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
- "header length field shift exceeeds limit");
+ "header length field shift exceeds limit");
node->header_length_field_shift = field->offset_shift;
node->header_length_field_offset = field->offset_base;
}
uint8_t _exp = 0;
uint64_t m, e;
- /* Special case xir == 0 ? both exp and matissa are 0. */
+ /* Special case xir == 0 ? both exp and mantissa are 0. */
if (xir == 0) {
*man = 0;
*exp = 0;
int _exp;
double _man;
- /* Special case xbs == 0 ? both exp and matissa are 0. */
+ /* Special case xbs == 0 ? both exp and mantissa are 0. */
if (xbs == 0) {
*man = 0;
*exp = 0;
* Fill the prm meter parameter.
*
* @param[in,out] fmp
- * Pointer to meter profie to be converted.
+ * Pointer to meter profile to be converted.
* @param[out] error
* Pointer to the error structure.
*
if (ret)
return ret;
}
- /* Update succeedded modify meter parameters. */
+ /* Update succeeded modify meter parameters. */
if (modify_bits & MLX5_FLOW_METER_OBJ_MODIFY_FIELD_ACTIVE)
fm->active_state = !!active_state;
}
return -rte_mtr_error_set(error, -ret,
RTE_MTR_ERROR_TYPE_MTR_PARAMS,
NULL, "Failed to update meter"
- " parmeters in hardware.");
+ " parameters in hardware.");
}
old_fmp->ref_cnt--;
fmp->ref_cnt++;
* Pointer to the device structure.
*
* @param rx_queue_id
- * Rx queue identificatior.
+ * Rx queue identification.
*
* @param mode
* Pointer to the burts mode information.
* Number of queues in the array.
*
* @return
- * 1 if all queues in indirection table match 0 othrwise.
+ * 1 if all queues in indirection table match 0 otherwise.
*/
static int
mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
if (hrxq->standalone) {
/*
* Replacement of indirection table unsupported for
- * stanalone hrxq objects (used by shared RSS).
+ * standalone hrxq objects (used by shared RSS).
*/
rte_errno = ENOTSUP;
return -rte_errno;
uint32_t mask = rxq->flow_meta_port_mask;
uint32_t metadata;
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
metadata = rte_be_to_cpu_32
(cq[pos].flow_table_metadata) & mask;
*RTE_MBUF_DYNFIELD(pkts[pos], offs, uint32_t *) =
}
}
if (rxq->dynf_meta) {
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
}
}
if (rxq->dynf_meta) {
- /* This code is subject for futher optimization. */
+ /* This code is subject for further optimization. */
int32_t offs = rxq->flow_meta_offset;
uint32_t mask = rxq->flow_meta_port_mask;
* Pointer to the device structure.
*
* @param tx_queue_id
- * Tx queue identificatior.
+ * Tx queue identification.
*
* @param mode
* Pointer to the burts mode information.
/*
* For the case which data is linked with sequence increased index, the
- * array table will be more efficiect than hash table once need to serarch
+ * array table will be more efficient than hash table once need to search
* one data entry in large numbers of entries. Since the traditional hash
* tables has fixed table size, when huge numbers of data saved to the hash
* table, it also comes lots of hash conflict.
/*
* set_specific_workspace when current value is NULL
* can happen only once per thread, mark this thread in
- * linked list to be able to release reasorces later on.
+ * linked list to be able to release resources later on.
*/
err = mlx5_add_workspace_to_list(data);
if (err) {
* Pointer to RQ channel object, which includes the channel fd
*
* @param[out] fd
- * The file descriptor (representing the intetrrupt) used in this channel.
+ * The file descriptor (representing the interrupt) used in this channel.
*
* @return
* 0 on successfully setting the fd to non-blocking, non-zero otherwise.
(mru + MRVL_NETA_PKT_OFFS > mbuf_data_size)) {
mru = mbuf_data_size - MRVL_NETA_PKT_OFFS;
mtu = MRVL_NETA_MRU_TO_MTU(mru);
- MVNETA_LOG(WARNING, "MTU too big, max MTU possible limitted by"
+ MVNETA_LOG(WARNING, "MTU too big, max MTU possible limited by"
" current mbuf size: %u. Set MTU to %u, MRU to %u",
mbuf_data_size, mtu, mru);
}
if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
mtu = MRVL_PP2_MRU_TO_MTU(mru);
- MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
+ MRVL_LOG(WARNING, "MTU too big, max MTU possible limited "
"by current mbuf size: %u. Set MTU to %u, MRU to %u",
mbuf_data_size, mtu, mru);
}
}
/**
- * Parse Traffic Class'es mapping configuration.
+ * Parse Traffic Classes mapping configuration.
*
* @param file Config file handle.
* @param port Which port to look for.
/* MRVL_TOK_START_HDR replaces MRVL_TOK_DSA_MODE parameter.
* MRVL_TOK_DSA_MODE will be supported for backward
- * compatibillity.
+ * compatibility.
*/
entry = rte_cfgfile_get_entry(file, sec_name,
MRVL_TOK_START_HDR);
hv->rxbuf_section_cnt = resp.nvs_sect[0].slotcnt;
/*
- * Pimary queue's rxbuf_info is not allocated at creation time.
+ * Primary queue's rxbuf_info is not allocated at creation time.
* Now we can allocate it after we figure out the slotcnt.
*/
hv->primary->rxbuf_info = rte_calloc("HN_RXBUF_INFO",
rte_iova_t iova;
/*
- * Build an external mbuf that points to recveive area.
+ * Build an external mbuf that points to receive area.
* Use refcount to handle multiple packets in same
* receive buffer section.
*/
* returns:
* - -EINVAL - offset outside of ring
* - RTE_ETH_RX_DESC_AVAIL - no data available yet
- * - RTE_ETH_RX_DESC_DONE - data is waiting in stagin ring
+ * - RTE_ETH_RX_DESC_DONE - data is waiting in staging ring
*/
int hn_dev_rx_queue_status(void *arg, uint16_t offset)
{
struct rte_device *dev = rte_eth_devices[port_id].device;
int ret;
- /* Tell VSP to switch data path to synthentic */
+ /* Tell VSP to switch data path to synthetic */
hn_vf_remove(hv);
PMD_DRV_LOG(NOTICE, "Start to remove port %d", port_id);
* Wildcard indicating a CPP read or write action
*
* The action used will be either read or write depending on whether a read or
- * write instruction/call is performed on the NFP_CPP_ID. It is recomended that
+ * write instruction/call is performed on the NFP_CPP_ID. It is recommended that
* the RW action is used even if all actions to be performed on a NFP_CPP_ID are
* known to be only reads or writes. Doing so will in many cases save NFP CPP
* internal software resources.
* @param chip_family Chip family ID
* @param s A string of format "iX.anything" or "iX"
* @param endptr If non-NULL, *endptr will point to the trailing
- * striong after the ME ID part of the string, which
+ * string after the ME ID part of the string, which
* is either an empty string or the first character
* after the separating period.
* @return The island ID on succes, -1 on error.
* @param chip_family Chip family ID
* @param s A string of format "meX.anything" or "meX"
* @param endptr If non-NULL, *endptr will point to the trailing
- * striong after the ME ID part of the string, which
+ * string after the ME ID part of the string, which
* is either an empty string or the first character
* after the separating period.
* @return The ME number on succes, -1 on error.
* @address: start address on CPP target
* @size: size of area
*
- * Allocate and initilizae a CPP area structure, and lock it down so
+ * Allocate and initialize a CPP area structure, and lock it down so
* that it can be accessed directly.
*
* NOTE: @address and @size must be 32-bit aligned values.
* @br_primary: branch id of primary bootloader
* @br_secondary: branch id of secondary bootloader
* @br_nsp: branch id of NSP
- * @primary: version of primarary bootloader
+ * @primary: version of primary bootloader
* @secondary: version id of secondary bootloader
* @nsp: version id of NSP
* @sensor_mask: mask of present sensors available on NIC
* nfp_resource_release() - Release a NFP Resource handle
* @res: NFP Resource handle
*
- * NOTE: This function implictly unlocks the resource handle
+ * NOTE: This function implicitly unlocks the resource handle
*/
void
nfp_resource_release(struct nfp_resource *res)
* nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol
* @rtbl: NFP RTsym table
* @name: Symbol name
- * @error: Poniter to error code (optional)
+ * @error: Pointer to error code (optional)
*
* Lookup a symbol, map, read it and return it's value. Value of the symbol
* will be interpreted as a simple little-endian unsigned value. Symbol can
}
}
- /* confiugre MSI-X for sleep until Rx interrupt */
+ /* configure MSI-X for sleep until Rx interrupt */
ngbe_configure_msix(dev);
/* initialize transmission unit */
wr32(hw, NGBE_IVARMISC, tmp);
} else {
/* rx or tx causes */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, NGBE_IVAR(queue >> 1));
tmp &= ~(0xFF << idx);
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
wr32(hw, NGBE_ETFLT(NGBE_ETF_ID_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
wr32(hw, NGBE_TSTIMEINC, 0);
return 0;
wr32(hw, NGBE_PSRCTL, NGBE_PSRCTL_LBENA);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
/* clear VMDq map to scan rar 31 */
/* Verify queue index */
if (qidx >= dev->data->nb_rx_queues) {
- octeontx_log_err("QID %d not supporteded (0 - %d available)\n",
+ octeontx_log_err("QID %d not supported (0 - %d available)\n",
qidx, (dev->data->nb_rx_queues - 1));
return -ENOTSUP;
}
"rc=%d", rc);
return rc;
}
- /* VFIO vector zero is resereved for misc interrupt so
+ /* VFIO vector zero is reserved for misc interrupt so
* doing required adjustment. (b13bfab4cd)
*/
if (rte_intr_vec_list_index_set(handle, q,
/* This API returns the raw PTP HI clock value. Since LFs doesn't
* have direct access to PTP registers and it requires mbox msg
* to AF for this value. In fastpath reading this value for every
- * packet (which involes mbox call) becomes very expensive, hence
+ * packet (which involves mbox call) becomes very expensive, hence
* we should be able to derive PTP HI clock value from tsc by
* using freq_mult and clk_delta calculated during configure stage.
*/
/* Retrieving the default desc values */
cmd[off] = send_mem_desc[6];
- /* Using compiler barier to avoid voilation of C
+ /* Using compiler barrier to avoid violation of C
* aliasing rules.
*/
rte_compiler_barrier();
/* Packets for which RTE_MBUF_F_TX_IEEE1588_TMST is not set, tx tstamp
* should not be recorded, hence changing the alg type to
* NIX_SENDMEMALG_SET and also changing send mem addr field to
- * next 8 bytes as it corrpt the actual tx tstamp registered
+ * next 8 bytes as it corrupts the actual tx tstamp registered
* address.
*/
send_mem->alg = NIX_SENDMEMALG_SETTSTMP - (is_ol_tstamp);
struct vlan_entry *entry;
int rc;
- /* VLAN filters can't be set without setting filtern on */
+ /* VLAN filters can't be set without setting filters on */
rc = nix_vlan_handle_default_rx_entry(eth_dev, false, true, true);
if (rc) {
otx2_err("Failed to reinstall vlan filters");
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
SDP_VF_R_IN_CNTS(iq_no);
- otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p",
+ otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
do {
iq->inst_cnt_reg = (uint8_t *)otx_ep->hw_addr +
OTX_EP_R_IN_CNTS(iq_no);
- otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
+ otx_ep_dbg("InstQ[%d]:dbell reg @ 0x%p inst_cnt_reg @ 0x%p\n",
iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
do {
if (eth_dev == NULL)
return -ENOMEM;
- /* Extract pltform data */
+ /* Extract platform data */
pfe_info = (struct ls1012a_pfe_platform_data *)&pfe->platform_data;
if (!pfe_info) {
PFE_PMD_ERR("pfe missing additional platform data");
{
u32 val = readl(base + EMAC_RCNTRL_REG);
- /*Remove loopbank*/
+ /* Remove loopback */
val &= ~EMAC_RCNTRL_LOOP;
/*Enable flow control and MII mode*/
* results, eth id, queue id from PFE block along with data.
* so we have to provide additional memory for each packet to
* HIF rx rings so that PFE block can write its headers.
- * so, we are giving the data pointor to HIF rings whose
+ * so, we are giving the data pointer to HIF rings whose
* calculation is as below:
- * mbuf->data_pointor - Required_header_size
+ * mbuf->data_pointer - Required_header_size
*
* We are utilizing the HEADROOM area to receive the PFE
* block headers. On packet reception, HIF driver will use
#define HIF_CLIENT_QUEUES_MAX 16
#define HIF_RX_PKT_MIN_SIZE RTE_CACHE_LINE_SIZE
/*
- * HIF_TX_DESC_NT value should be always greter than 4,
+ * HIF_TX_DESC_NT value should be always greater than 4,
* Otherwise HIF_TX_POLL_MARK will become zero.
*/
#define HIF_RX_DESC_NT 64
* This function should be called before initializing HIF driver.
*
* @param[in] hif_shm Shared memory address location in DDR
- * @rerurn 0 - on succes, <0 on fail to initialize
+ * @return 0 - on succes, <0 on fail to initialize
*/
int
pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
for (ii = 0; ii < client->rx_q[qno].size; ii++) {
buf = (void *)desc->data;
if (buf) {
- /* Data pointor to mbuf pointor calculation:
+ /* Data pointer to mbuf pointer calculation:
* "Data - User private data - headroom - mbufsize"
- * Actual data pointor given to HIF BDs was
+ * Actual data pointer given to HIF BDs was
* "mbuf->data_offset - PFE_PKT_HEADER_SZ"
*/
buf = buf + PFE_PKT_HEADER_SZ
client_id, unsigned int qno,
u32 client_ctrl)
{
- /* Optimize the write since the destinaton may be non-cacheable */
+ /* Optimize the write since the destination may be non-cacheable */
if (!((unsigned long)pkt_hdr & 0x3)) {
((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
client_id;
/* Reads a param from the specified buffer. Returns the number of dwords read.
* If the returned str_param is NULL, the param is numeric and its value is
* returned in num_param.
- * Otheriwise, the param is a string and its pointer is returned in str_param.
+ * Otherwise, the param is a string and its pointer is returned in str_param.
*/
static u32 qed_read_param(u32 *dump_buf,
const char **param_name,
text_buf[i] = '\n';
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
OSAL_VFREE(p_hwfn, feature->dump_buf);
if (fp->rxq != NULL) {
bufsz = (uint16_t)rte_pktmbuf_data_room_size(
fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
- /* cache align the mbuf size to simplfy rx_buf_size
+ /* cache align the mbuf size to simplify rx_buf_size
* calculation
*/
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
* (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
* 3) In regular mode - minimum rx_buf_size should be
* (MTU + Maximum L2 Header Size + 2)
- * In above cases +2 corrosponds to 2 bytes padding in front of L2
+ * In above cases +2 corresponds to 2 bytes padding in front of L2
* header.
* 4) rx_buf_size should be cacheline-size aligned. So considering
* criteria 1, we need to adjust the size to floor instead of ceil,
if (dev->data->scattered_rx) {
/* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
- * bufferes can be used for single packet. So need to make sure
+ * buffers can be used for single packet. So need to make sure
* mbuf size is sufficient enough for this.
*/
if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
- /* cache align the mbuf size to simplfy rx_buf_size calculation */
+ /* cache align the mbuf size to simplify rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
(max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
}
}
- /* Request number of bufferes to be allocated in next loop */
+ /* Request number of buffers to be allocated in next loop */
rxq->rx_alloc_count = rx_alloc_count;
rxq->rcv_pkts += rx_pkt;
}
}
- /* Request number of bufferes to be allocated in next loop */
+ /* Request number of buffers to be allocated in next loop */
rxq->rx_alloc_count = rx_alloc_count;
rxq->rcv_pkts += rx_pkt;
/* Inner L2 header size in two byte words */
inner_l2_hdr_size = (mbuf->l2_len -
MPLSINUDP_HDR_SIZE) / 2;
- /* Inner L4 header offset from the beggining
+ /* Inner L4 header offset from the beginning
* of inner packet in two byte words
*/
inner_l4_hdr_offset = (mbuf->l2_len -
struct qede_tx_queue *txq;
};
-/* This structure holds the inforation of fast path queues
+/* This structure holds the information of fast path queues
* belonging to individual engines in CMT mode.
*/
struct qede_fastpath_cmt {
/*
* Limits are strict since take into account initial estimation.
- * Resource allocation stategy is described in
+ * Resource allocation strategy is described in
* sfc_estimate_resource_limits().
*/
lim.edl_min_evq_count = lim.edl_max_evq_count =
{
if (sfc_dp_find_by_name(head, entry->type, entry->name) != NULL) {
SFC_GENERIC_LOG(ERR,
- "sfc %s dapapath '%s' already registered",
+ "sfc %s datapath '%s' already registered",
entry->type == SFC_DP_RX ? "Rx" :
entry->type == SFC_DP_TX ? "Tx" :
"unknown",
struct sfc_dp_rxq **dp_rxqp);
/**
- * Free resources allocated for datapath recevie queue.
+ * Free resources allocated for datapath receive queue.
*/
typedef void (sfc_dp_rx_qdestroy_t)(struct sfc_dp_rxq *dp_rxq);
/**
* Receive queue purge function called after queue flush.
*
- * Should be used to free unused recevie buffers.
+ * Should be used to free unused receive buffers.
*/
typedef void (sfc_dp_rx_qpurge_t)(struct sfc_dp_rxq *dp_rxq);
*
* @param evq_prime Global address of the prime register
* @param evq_hw_index Event queue index
- * @param evq_read_ptr Masked event qeueu read pointer
+ * @param evq_read_ptr Masked event queue read pointer
*/
static inline void
sfc_ef100_evq_prime(volatile void *evq_prime, unsigned int evq_hw_index,
unsup_rx_prefix_fields =
efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
- /* LENGTH and CLASS filds must always be present */
+ /* LENGTH and CLASS fields must always be present */
if ((unsup_rx_prefix_fields &
((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
(1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
rxq->block_size, rxq->buf_stride);
sfc_ef10_essb_rx_info(&rxq->dp.dpq,
"max fill level is %u descs (%u bufs), "
- "refill threashold %u descs (%u bufs)",
+ "refill threshold %u descs (%u bufs)",
rxq->max_fill_level,
rxq->max_fill_level * rxq->block_size,
rxq->refill_threshold,
rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
(1ull << ESF_DZ_RX_ECRC_ERR_LBN) |
(1ull << ESF_DZ_RX_PARSE_INCOMPLETE_LBN)))) {
- /* Zero packet type is used as a marker to dicard bad packets */
+ /* Zero packet type is used as a marker to discard bad packets */
goto done;
}
*/
/*
- * At the momemt of writing DPDK v16.07 has notion of two types of
+ * At the moment of writing DPDK v16.07 has notion of two types of
* interrupts: LSC (link status change) and RXQ (receive indication).
* It allows to register interrupt callback for entire device which is
* not intended to be used for receive indication (i.e. link status
/* Make sure that end padding does not write beyond the buffer */
if (buf_aligned < nic_align_end) {
/*
- * Estimate space which can be lost. If guarnteed buffer
+ * Estimate space which can be lost. If guaranteed buffer
* size is odd, lost space is (nic_align_end - 1). More
* accurate formula is below.
*/
/*
* Finalize only ethdev queues since other ones are finalized only
- * on device close and they may require additional deinitializaton.
+ * on device close and they may require additional deinitialization.
*/
ethdev_qid = sas->ethdev_rxq_count;
while (--ethdev_qid >= (int)nb_rx_queues) {
reconfigure = true;
- /* Do not ununitialize reserved queues */
+ /* Do not uninitialize reserved queues */
if (nb_rx_queues < sas->ethdev_rxq_count)
sfc_rx_fini_queues(sa, nb_rx_queues);
/*
* Finalize only ethdev queues since other ones are finalized only
- * on device close and they may require additional deinitializaton.
+ * on device close and they may require additional deinitialization.
*/
ethdev_qid = sas->ethdev_txq_count;
while (--ethdev_qid >= (int)nb_tx_queues) {
* Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
* respectively.
* They are located within a larger buffer at offsets *toffset* and *foffset*
- * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
+ * respectively. Both *tmask* and *fmask* represent bitmasks for the larger
* buffer.
* Question: are the two masks equivalent?
*
}
}
-/* Accumaulate L4 raw checksums */
+/* Accumulate L4 raw checksums */
static void
tap_tx_l4_add_rcksum(char *l4_data, unsigned int l4_len, uint16_t *l4_cksum,
uint32_t *l4_raw_cksum)
* Load BPF instructions to kernel
*
* @param[in] type
- * BPF program type: classifieir or action
+ * BPF program type: classifier or action
*
* @param[in] insns
* Array of BPF instructions (equivalent to BPF instructions)
* @param[in] insns_cnt
* Number of BPF instructions (size of array)
*
- * @param[in] lincense
+ * @param[in] license
* License string that must be acknowledged by the kernel
*
* @return
}
/**
- * Helper function to send a serie of TC actions to the kernel
+ * Helper function to send a series of TC actions to the kernel
*
* @param[in] flow
* Pointer to rte flow containing the netlink message
break;
/*
- * Subtract offest to restore real key index
+ * Subtract offset to restore real key index
* If a non RSS flow is falsely trying to release map
* entry 0 - the offset subtraction will calculate the real
* map index as an out-of-range value and the release operation
entry = rte_zmalloc("nicvf", sizeof(*entry), RTE_CACHE_LINE_SIZE);
if (entry == NULL)
- rte_panic("Cannoc allocate memory for svf_entry\n");
+ rte_panic("Cannot allocate memory for svf_entry\n");
entry->vf = vf;
return -ENOMEM;
}
}
- /* confiugre msix for sleep until rx interrupt */
+ /* configure msix for sleep until rx interrupt */
txgbe_configure_msix(dev);
/* initialize transmission unit */
wr32(hw, TXGBE_IVARMISC, tmp);
} else {
/* rx or tx causes */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_IVAR(queue >> 1));
tmp &= ~(0xFF << idx);
/* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
- /* Stop incrementating the System Time registers. */
+ /* Stop incrementing the System Time registers. */
wr32(hw, TXGBE_TSTIMEINC, 0);
return 0;
wr32(hw, TXGBE_VFIVARMISC, tmp);
} else {
/* rx or tx cause */
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
idx = ((16 * (queue & 1)) + (8 * direction));
tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
tmp &= ~(0xFF << idx);
/* Configure all RX queues of VF */
for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
/* Force all queue use vector 0,
- * as TXGBE_VF_MAXMSIVECOTR = 1
+ * as TXGBE_VF_MAXMSIVECTOR = 1
*/
txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
rte_intr_vec_list_index_set(intr_handle, q_idx,
/* only one misc vector supported - mailbox */
eicr &= TXGBE_VFICR_MASK;
- /* Workround for ICR lost */
+ /* Workaround for ICR lost */
intr->flags |= TXGBE_FLAG_MAILBOX;
/* To avoid compiler warnings set eicr to used. */
return -1;
}
- /* Disable and clear Rx SPI and key table entryes*/
+ /* Disable and clear Rx SPI and key table entries */
reg_val = TXGBE_IPSRXIDX_WRITE |
TXGBE_IPSRXIDX_TB_SPI | (sa_index << 3);
wr32(hw, TXGBE_IPSRXSPI, 0);
wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
- /* clear VMDq map to perment rar 0 */
+ /* clear VMDq map to permanent rar 0 */
hw->mac.clear_vmdq(hw, 0, BIT_MASK32);
/* clear VMDq map to scan rar 127 */
hw->has_rx_offload = rx_offload_enabled(hw);
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
- /* Enable vector (0) for Link State Intrerrupt */
+ /* Enable vector (0) for Link State Interrupt */
if (VIRTIO_OPS(hw)->set_config_irq(hw, 0) ==
VIRTIO_MSI_NO_VECTOR) {
PMD_DRV_LOG(ERR, "failed to set config vector");
}
}
- /* Enable uio/vfio intr/eventfd mapping: althrough we already did that
+ /* Enable uio/vfio intr/eventfd mapping: although we already did that
* in device configure, but it could be unmapped when device is
* stopped.
*/
return dst;
}
-/* Enable one vector (0) for Link State Intrerrupt */
+/* Enable one vector (0) for Link State Interrupt */
static uint16_t
legacy_set_config_irq(struct virtio_hw *hw, uint16_t vec)
{
return -EINVAL;
}
- /* Update mss lengthes in mbuf */
+ /* Update mss lengths in mbuf */
m->tso_segsz = hdr->gso_size;
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
/*
* load len from desc, store into mbuf pkt_len and data_len
- * len limiated by l6bit buf_len, pkt_len[16:31] can be ignored
+ * len limited by l6bit buf_len, pkt_len[16:31] can be ignored
*/
const __mmask16 mask = 0x6 | 0x6 << 4 | 0x6 << 8 | 0x6 << 12;
__m512i values = _mm512_maskz_shuffle_epi32(mask, v_desc, 0xAA);
/*
* Two types of mbuf to be cleaned:
* 1) mbuf that has been consumed by backend but not used by virtio.
- * 2) mbuf that hasn't been consued by backend.
+ * 2) mbuf that hasn't been consumed by backend.
*/
struct rte_mbuf *
virtqueue_detach_unused(struct virtqueue *vq)
* Control link announce acknowledgement
*
* The command VIRTIO_NET_CTRL_ANNOUNCE_ACK is used to indicate that
- * driver has recevied the notification; device would clear the
+ * driver has received the notification; device would clear the
* VIRTIO_NET_S_ANNOUNCE bit in the status field after it receives
* this command.
*/
struct vq_desc_extra vq_descx[0];
};
-/* If multiqueue is provided by host, then we suppport it. */
+/* If multiqueue is provided by host, then we support it. */
#define VIRTIO_NET_CTRL_MQ 4
#define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET 0
rte_prefetch0((void *)(size_t)(dq_storage + 1));
/* Prepare next pull descriptor. This will give space for the
- * prefething done on DQRR entries
+ * prefetching done on DQRR entries
*/
q_storage->toggle ^= 1;
dq_storage1 = q_storage->dq_storage[q_storage->toggle];
/** total number of hw queues. */
uint16_t num_hw_queues;
/**
- * Maximum number of hw queues to be alocated per core.
+ * Maximum number of hw queues to be allocated per core.
* This is limited by MAX_HW_QUEUE_PER_CORE
*/
uint16_t max_hw_queues_per_core;
struct fsl_mc_io dpdmai;
/** HW ID for DPDMAI object */
uint32_t dpdmai_id;
- /** Tocken of this device */
+ /** Token of this device */
uint16_t token;
/** Number of queue in this DPDMAI device */
uint8_t num_queues;
if (HIGH_WARN(sensor, value) ||
LOW_WARN(sensor, value)) {
- IFPGA_RAWDEV_PMD_INFO("%s reach theshold %d\n",
+ IFPGA_RAWDEV_PMD_INFO("%s reach threshold %d\n",
sensor->name, value);
*gsd_start = true;
break;
if (!strcmp(sensor->name, "12V AUX Voltage")) {
if (value < AUX_VOLTAGE_WARN) {
IFPGA_RAWDEV_PMD_INFO(
- "%s reach theshold %d mV\n",
+ "%s reach threshold %d mV\n",
sensor->name, value);
*gsd_start = true;
break;
pos = ifpga_pci_find_ext_capability(fd, RTE_PCI_EXT_CAP_ID_ERR);
if (!pos)
goto end;
- /* save previout ECAP_AER+0x08 */
+ /* save previous ECAP_AER+0x08 */
ret = pread(fd, &data, sizeof(data), pos+0x08);
if (ret == -1)
goto end;
ifpga_rdev->aer_old[0] = data;
- /* save previout ECAP_AER+0x14 */
+ /* save previous ECAP_AER+0x14 */
ret = pread(fd, &data, sizeof(data), pos+0x14);
if (ret == -1)
goto end;
ifpga_rawdev_gsd_handle, NULL);
if (ret != 0) {
IFPGA_RAWDEV_PMD_ERR(
- "Fail to create ifpga nonitor thread");
+ "Fail to create ifpga monitor thread");
return -1;
}
ifpga_monitor_start = 1;
* @spad_write: Write val to local/peer spad register.
* @db_read: Read doorbells status.
* @db_clear: Clear local doorbells.
- * @db_set_mask: Set bits in db mask, preventing db interrpts generated
+ * @db_set_mask: Set bits in db mask, preventing db interrupts generated
* for those db bits.
* @peer_db_set: Set doorbell bit to generate peer interrupt for that bit.
* @vector_bind: Bind vector source [intr] to msix vector [msix].
* The target here is to group all the physical memory regions of the
* virtio device in one indirect mkey.
* For KLM Fixed Buffer Size mode (HW find the translation entry in one
- * read according to the guest phisical address):
+ * read according to the guest physical address):
* All the sub-direct mkeys of it must be in the same size, hence, each
* one of them should be in the GCD size of all the virtio memory
* regions and the holes between them.
if (priv->features & (1ULL << VIRTIO_F_RING_PACKED)) {
if (!(priv->caps.virtio_queue_type & (1 <<
MLX5_VIRTQ_TYPE_PACKED))) {
- DRV_LOG(ERR, "Failed to configur PACKED mode for vdev "
+ DRV_LOG(ERR, "Failed to configure PACKED mode for vdev "
"%d - it was not reported by HW/driver"
" capability.", priv->vid);
return -ENOTSUP;
/* Encoder output to Decoder input adapter. The Decoder accepts only soft input
* so each bit of the encoder output must be translated into one byte of LLR. If
* Sub-block Deinterleaver is bypassed, which is the case, the padding bytes
- * must additionally be insterted at the end of each sub-block.
+ * must additionally be inserted at the end of each sub-block.
*/
static inline void
transform_enc_out_dec_in(struct rte_mbuf **mbufs, uint8_t *temp_buf,
0 /*SOCKET_ID_ANY*/);
if (retval < 0)
rte_exit(EXIT_FAILURE,
- "Faled to create bond port\n");
+ "Failed to create bond port\n");
BOND_PORT = retval;
struct rte_ether_hdr *);
ether_type = eth_hdr->ether_type;
if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN))
- printf("VLAN taged frame, offset:");
+ printf("VLAN tagged frame, offset:");
offset = get_vlan_offset(eth_hdr, ðer_type);
if (offset > 0)
printf("%d\n", offset);
/* MAC updating enabled by default. */
static int mac_updating = 1;
-/* hardare copy mode enabled by default. */
+/* hardware copy mode enabled by default. */
static copy_mode_t copy_mode = COPY_MODE_DMA_NUM;
/* size of descriptor ring for hardware copy mode or
#endif
}
- /* Enable Rx vlan filter, VF unspport status is discard */
+ /* Enable Rx vlan filter, VF unsupported status is discard */
ret = rte_eth_dev_set_vlan_offload(port_id, RTE_ETH_VLAN_FILTER_MASK);
if (ret != 0)
return ret;
/**
* Retrieve the Ethernet device pause frame configuration according to
- * parameter attributes desribed by ethtool data structure,
+ * parameter attributes described by ethtool data structure,
* ethtool_pauseparam.
*
* @param port_id
/**
* Setting the Ethernet device pause frame configuration according to
- * parameter attributes desribed by ethtool data structure, ethtool_pauseparam.
+ * parameter attributes described by ethtool data structure, ethtool_pauseparam.
*
* @param port_id
* The port identifier of the Ethernet device.
#endif /* RTE_LIBRTE_IP_FRAG_TBL_STAT */
/*
- * If number of queued packets reached given threahold, then
+ * If number of queued packets reached given threshold, then
* send burst of packets on an output interface.
*/
static inline uint32_t
/*
* At any given moment up to <max_flow_num * (MAX_FRAG_NUM)>
- * mbufs could be stored int the fragment table.
+ * mbufs could be stored in the fragment table.
* Plus, each TX queue can hold up to <max_flow_num> packets.
*/
- /* mbufs stored int the gragment table. 8< */
+ /* mbufs stored in the fragment table. 8< */
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ BUF_SIZE - 1) / BUF_SIZE;
"rte_pktmbuf_pool_create(%s) failed", buf);
return -1;
}
- /* >8 End of mbufs stored int the fragmentation table. */
+ /* >8 End of mbufs stored in the fragmentation table. */
return 0;
}
for (i = 0; i < nb_rx_adapter; i++) {
adapter = &(em_conf->rx_adapter[i]);
sprintf(print_buf,
- "\tRx adaper ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
+ "\tRx adapter ID: %-2d\tConnections: %-2d\tEvent dev ID: %-2d",
adapter->adapter_id,
adapter->nb_connections,
adapter->eventdev_id);
/*
* Determine is multi-segment support required:
* - either frame buffer size is smaller then mtu
- * - or reassmeble support is requested
+ * - or reassemble support is requested
*/
static int
multi_seg_required(void)
ret = rte_hash_add_key_data(map, &key, (void *)i);
if (ret < 0) {
- printf("Faled to insert cdev mapping for (lcore %u, "
+ printf("Failed to insert cdev mapping for (lcore %u, "
"cdev %u, qp %u), errno %d\n",
key.lcore_id, ipsec_ctx->tbl[i].id,
ipsec_ctx->tbl[i].qp, ret);
str = "Inbound";
}
- /* Required cryptodevs with operation chainning */
+ /* Required cryptodevs with operation chaining */
if (!(dev_info->feature_flags &
RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING))
return ret;
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- /* limit allowed HW offloafs, as user requested */
+ /* limit allowed HW offloads, as user requested */
dev_info.rx_offload_capa &= dev_rx_offload;
dev_info.tx_offload_capa &= dev_tx_offload;
local_port_conf.rxmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required RX offloads: 0x%" PRIx64
- ", avaialbe RX offloads: 0x%" PRIx64 "\n",
+ ", available RX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
dev_info.rx_offload_capa);
local_port_conf.txmode.offloads)
rte_exit(EXIT_FAILURE,
"Error: port %u required TX offloads: 0x%" PRIx64
- ", avaialbe TX offloads: 0x%" PRIx64 "\n",
+ ", available TX offloads: 0x%" PRIx64 "\n",
portid, local_port_conf.txmode.offloads,
dev_info.tx_offload_capa);
if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
local_port_conf.txmode.offloads |= RTE_ETH_TX_OFFLOAD_IPV4_CKSUM;
- printf("port %u configurng rx_offloads=0x%" PRIx64
+ printf("port %u configuring rx_offloads=0x%" PRIx64
", tx_offloads=0x%" PRIx64 "\n",
portid, local_port_conf.rxmode.offloads,
local_port_conf.txmode.offloads);
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
if (rc4 >= 0) {
if (rc6 >= 0) {
RTE_LOG(ERR, IPSEC,
- "%s: SPI %u used simultaeously by "
+ "%s: SPI %u used simultaneously by "
"IPv4(%d) and IPv6 (%d) SP rules\n",
__func__, spi, rc4, rc6);
return -EINVAL;
}
/*
- * Allocate space and init rte_ipsec_sa strcutures,
+ * Allocate space and init rte_ipsec_sa structures,
* one per session.
*/
static int
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
continue;
}
- /* unrecognizeable input */
+ /* unrecognizable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
return;
st=$?
REMOTE_MAC=`echo ${REMOTE_MAC} | sed -e 's/^.*ether //' -e 's/ brd.*$//'`
if [[ $st -ne 0 || -z "${REMOTE_MAC}" ]]; then
- echo "coouldn't retrieve ether addr from ${REMOTE_IFACE}"
+ echo "couldn't retrieve ether addr from ${REMOTE_IFACE}"
exit 127
fi
# by default ipsec-secgw can't deal with multi-segment packets
# make sure our local/remote host wouldn't generate fragmented packets
-# if reassmebly option is not enabled
+# if reassembly option is not enabled
DEF_MTU_LEN=1400
DEF_PING_LEN=1200
pthread_t kni_link_tid;
int pid;
- /* Associate signal_hanlder function with USR signals */
+ /* Associate signal_handler function with USR signals */
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
signal(SIGRTMIN, signal_handler);
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- /* >8 End of initializion the Environment Abstraction Layer (EAL). */
+ /* >8 End of initialization the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
ethdev_count++;
}
- /* Event device configurtion */
+ /* Event device configuration */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Enable implicit release */
ethdev_count++;
}
- /* Event device configurtion */
+ /* Event device configuration */
rte_event_dev_info_get(event_d_id, &dev_info);
/* Enable implicit release */
qconf->next_flush_time[portid] = rte_get_timer_cycles() + drain_tsc;
}
- /* Pass target to indicate that this job is happy of time interwal
+ /* Pass target to indicate that this job is happy of time interval
* in which it was called. */
rte_jobstats_finish(&qconf->flush_job, qconf->flush_job.target);
}
}
/*
- * Parses IPV6 address, exepcts the following format:
- * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X - is a hexedecimal digit).
+ * Parse IPv6 address, expects the following format:
+ * XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX:XXXX (where X is a hexadecimal digit).
*/
static int
parse_ipv6_addr(const char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
}
/*
- * build-up default vaues for dest MACs.
+ * build-up default values for dest MACs.
*/
static void
set_default_dest_mac(void)
}
-/* Freqency scale down timer callback */
+/* Frequency scale down timer callback */
static void
power_timer_cb(__rte_unused struct rte_timer *tim,
__rte_unused void *arg)
ret = rte_metrics_update_values(RTE_METRICS_GLOBAL, telstats_index,
values, RTE_DIM(values));
if (ret < 0)
- RTE_LOG(WARNING, POWER, "failed to update metrcis\n");
+ RTE_LOG(WARNING, POWER, "failed to update metrics\n");
}
static int
#endif /* DO_RFC_1812_CHECKS */
/*
- * We group consecutive packets with the same destionation port into one burst.
+ * We group consecutive packets with the same destination port into one burst.
* To avoid extra latency this is done together with some other packet
* processing, but after we made a final decision about packet's destination.
* To do this we maintain:
static const struct {
uint64_t pnum; /* prebuild 4 values for pnum[]. */
- int32_t idx; /* index for new last updated elemnet. */
+ int32_t idx; /* index for new last updated element. */
uint16_t lpv; /* add value to the last updated element. */
} gptbl[GRPSZ] = {
{
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
cmdline_printf(cl, "detached device %s\n",
da.name);
else
- cmdline_printf(cl, "failed to dettach device %s\n",
+ cmdline_printf(cl, "failed to detach device %s\n",
da.name);
rte_devargs_reset(&da);
}
/*
* This sample application is a simple multi-process application which
- * demostrates sharing of queues and memory pools between processes, and
+ * demonstrates sharing of queues and memory pools between processes, and
* using those queues/pools for communication between the processes.
*
* Application is designed to run with two processes, a primary and a
*/
/*
- * Sample application demostrating how to do packet I/O in a multi-process
+ * Sample application demonstrating how to do packet I/O in a multi-process
* environment. The same code can be run as a primary process and as a
* secondary process, just with a different proc-id parameter in each case
* (apart from the EAL flag to indicate a secondary process).
break;
}
- /* Print packet forwading config. */
+ /* Print packet forwarding config. */
RTE_LCORE_FOREACH_WORKER(lcore_id) {
conf = &fwd_lcore_conf[lcore_id];
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid packet_ordering arguments\n");
- /* Check if we have enought cores */
+ /* Check if we have enough cores */
if (rte_lcore_count() < 3)
rte_exit(EXIT_FAILURE, "Error, This application needs at "
"least 3 logical cores to run:\n"
bzero(lt, sizeof(struct lthread));
lt->root_sched = THIS_SCHED;
- /* set the function args and exit handlder */
+ /* set the function args and exit handler */
_lthread_init(lt, fun, arg, _lthread_exit_handler);
/* put it in the ready queue */
}
- /* wait until the joinging thread has collected the exit value */
+ /* wait until the joining thread has collected the exit value */
while (lt->join != LT_JOIN_EXIT_VAL_READ)
_reschedule();
/* invalid to join a detached thread, or a thread that is joined */
if ((lt_state & BIT(ST_LT_DETACH)) || (lt->join == LT_JOIN_THREAD_SET))
return POSIX_ERRNO(EINVAL);
- /* pointer to the joining thread and a poingter to return a value */
+ /* pointer to the joining thread and a pointer to return a value */
lt->lt_join = current;
current->lt_exit_ptr = ptr;
/* There is a race between lthread_join() and lthread_exit()
}
/*
- * Defafult diagnostic callback
+ * Default diagnostic callback
*/
static uint64_t
_lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
LT_JOIN_EXIT_VAL_READ, /* joining thread has collected ret val */
};
-/* defnition of an lthread stack object */
+/* definition of an lthread stack object */
struct lthread_stack {
uint8_t stack[LTHREAD_MAX_STACK_SIZE];
size_t stack_size;
tls->root_sched = (THIS_SCHED);
lt->tls = tls;
- /* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */
+ /* allocate data for TLS variables using RTE_PER_LTHREAD macros */
if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) {
lt->per_lthread_data =
_lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache);
}
/*
- * When set to zero, simple forwaring path is eanbled.
+ * When set to zero, simple forwarding path is enabled.
* When set to one, optimized forwarding path is enabled.
* Note that LPM optimisation path uses SSE4.1 instructions.
*/
}
/*
- * We group consecutive packets with the same destionation port into one burst.
+ * We group consecutive packets with the same destination port into one burst.
* To avoid extra latency this is done together with some other packet
* processing, but after we made a final decision about packet's destination.
* To do this we maintain:
/*
* Group consecutive packets with the same destination port in bursts of 4.
- * Suppose we have array of destionation ports:
+ * Suppose we have array of destination ports:
* dst_port[] = {a, b, c, d,, e, ... }
* dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
* We doing 4 comparisons at once and the result is 4 bit mask.
{
static const struct {
uint64_t pnum; /* prebuild 4 values for pnum[]. */
- int32_t idx; /* index for new last updated elemnet. */
+ int32_t idx; /* index for new last updated element. */
uint16_t lpv; /* add value to the last updated element. */
} gptbl[GRPSZ] = {
{
/*
* Send packets out, through destination port.
- * Consecuteve pacekts with the same destination port
+ * Consecutive packets with the same destination port
* are already grouped together.
* If destination port for the packet equals BAD_PORT,
* then free the packet without sending it out.
ret = rte_timer_subsystem_init();
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Failed to initialize timer subystem\n");
+ rte_exit(EXIT_FAILURE, "Failed to initialize timer subsystem\n");
/* pre-init dst MACs for all ports to 02:00:00:00:00:xx */
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
*
* The decision whether to invoke the real library function or the lthread
* function is controlled by a per pthread flag that can be switched
- * on of off by the pthread_override_set() API described below. Typcially
+ * on of off by the pthread_override_set() API described below. Typically
* this should be done as the first action of the initial lthread.
*
* N.B In general it would be poor practice to revert to invoke a real
; This program is setting up two register arrays called "pkt_counters" and "byte_counters".
; On every input packet (Ethernet/IPv4), the "pkt_counters" register at location indexed by
; the IPv4 header "Source Address" field is incremented, while the same location in the
-; "byte_counters" array accummulates the value of the IPv4 header "Total Length" field.
+; "byte_counters" array accumulates the value of the IPv4 header "Total Length" field.
;
; The "regrd" and "regwr" CLI commands can be used to read and write the current value of
; any register array location.
" qavg port X subport Y pipe Z : Show average queue size per pipe.\n"
" qavg port X subport Y pipe Z tc A : Show average queue size per pipe and TC.\n"
" qavg port X subport Y pipe Z tc A q B : Show average queue size of a specific queue.\n"
- " qavg [n|period] X : Set number of times and peiod (us).\n\n"
+ " qavg [n|period] X : Set number of times and period (us).\n\n"
);
}
}
}
}
-/* >8 End of packets dequeueing. */
+/* >8 End of packets dequeuing. */
/*
* Application main function - loops through
int ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- /* >8 End of initializion the Environment Abstraction Layer (EAL). */
+ /* >8 End of initialization the Environment Abstraction Layer (EAL). */
argc -= ret;
argv += ret;
static char *socket_files;
static int nb_sockets;
-/* empty vmdq configuration structure. Filled in programatically */
+/* empty VMDq configuration structure. Filled in programmatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
+ * forward packets from one virtio dev to another virtio dev.
*/
.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
},
" --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
" --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
- " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
+ " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
" --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
" --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
" --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
tx_q = &lcore_tx_queue[lcore_id];
for (i = 0; i < rte_lcore_count(); i++) {
/*
* Remove a device from the specific data core linked list and from the
- * main linked list. Synchonization occurs through the use of the
+ * main linked list. Synchronization occurs through the use of the
* lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
/*
* So now that we're handling virtual and physical cores, we need to
- * differenciate between them when adding them to the branch monitor.
+ * differentiate between them when adding them to the branch monitor.
* Virtual cores need to be converted to physical cores.
*/
if (pol->pkt.core_type == RTE_POWER_CORE_TYPE_VIRTUAL) {
int power_manager_disable_turbo_core(unsigned int core_num);
/**
- * Get the current freuency of the core specified by core_num
+ * Get the current frequency of the core specified by core_num
*
* @param core_num
* The core number to get the current frequency
/* Default structure for VMDq. 8< */
-/* empty vmdq configuration structure. Filled in programatically */
+/* empty VMDq configuration structure. Filled in programmatically */
static const struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
}
/**
- * Get up to num elements from the fifo. Return the number actully read
+ * Get up to num elements from the FIFO. Return the number actually read
*/
static inline uint32_t
kni_fifo_get(struct rte_kni_fifo *fifo, void **data, uint32_t num)
return root;
}
- /* gather information about divirgent paths */
+ /* gather information about divergent paths */
lo_00 = 0;
hi_ff = UINT8_MAX;
for (k = n - 1; k >= 0; k--) {
dfa_ofs = vec_sub(t, r);
- /* QUAD/SINGLE caluclations. */
+ /* QUAD/SINGLE calculations. */
t = (xmm_t)vec_cmpgt((vector signed char)in, (vector signed char)tr_hi);
t = (xmm_t)vec_sel(
vec_sel(
}
/*
- * Resolve matches for multiple categories (LE 8, use 128b instuctions/regs)
+ * Resolve matches for multiple categories (LE 8, use 128b instructions/regs)
*/
static inline void
resolve_mcle8_avx512x1(uint32_t result[],
*/
/*
- * This implementation uses 512-bit registers(zmm) and instrincts.
+ * This implementation uses 512-bit registers(zmm) and intrinsics.
* So our main SIMD type is 512-bit width and each such variable can
* process sizeof(__m512i) / sizeof(uint32_t) == 16 entries in parallel.
*/
#define _F_(x) x##_avx512x16
/*
- * Same instrincts have different syntaxis (depending on the bit-width),
+ * Same intrinsics have different syntaxes (depending on the bit-width),
* so to overcome that few macros need to be defined.
*/
-/* Naming convention for generic epi(packed integers) type instrincts. */
+/* Naming convention for generic epi(packed integers) type intrinsics. */
#define _M_I_(x) _mm512_##x
-/* Naming convention for si(whole simd integer) type instrincts. */
+/* Naming convention for si(whole simd integer) type intrinsics. */
#define _M_SI_(x) _mm512_##x##_si512
-/* Naming convention for masked gather type instrincts. */
+/* Naming convention for masked gather type intrinsics. */
#define _M_MGI_(x) _mm512_##x
-/* Naming convention for gather type instrincts. */
+/* Naming convention for gather type intrinsics. */
#define _M_GI_(name, idx, base, scale) _mm512_##name(idx, base, scale)
/* num/mask of transitions per SIMD regs */
}
/*
- * Resolve matches for multiple categories (GT 8, use 512b instuctions/regs)
+ * Resolve matches for multiple categories (GT 8, use 512b instructions/regs)
*/
static inline void
resolve_mcgt8_avx512x1(uint32_t result[],
*/
/*
- * This implementation uses 256-bit registers(ymm) and instrincts.
+ * This implementation uses 256-bit registers(ymm) and intrinsics.
* So our main SIMD type is 256-bit width and each such variable can
* process sizeof(__m256i) / sizeof(uint32_t) == 8 entries in parallel.
*/
#define _F_(x) x##_avx512x8
/*
- * Same instrincts have different syntaxis (depending on the bit-width),
+ * Same intrinsics have different syntaxes (depending on the bit-width),
* so to overcome that few macros need to be defined.
*/
-/* Naming convention for generic epi(packed integers) type instrincts. */
+/* Naming convention for generic epi(packed integers) type intrinsics. */
#define _M_I_(x) _mm256_##x
-/* Naming convention for si(whole simd integer) type instrincts. */
+/* Naming convention for si(whole simd integer) type intrinsics. */
#define _M_SI_(x) _mm256_##x##_si256
-/* Naming convention for masked gather type instrincts. */
+/* Naming convention for masked gather type intrinsics. */
#define _M_MGI_(x) _mm256_m##x
-/* Naming convention for gather type instrincts. */
+/* Naming convention for gather type intrinsics. */
#define _M_GI_(name, idx, base, scale) _mm256_##name(base, idx, scale)
/* num/mask of transitions per SIMD regs */
BPF_EMIT_JMP;
break;
- /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
+ /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */
case BPF_LDX | BPF_MSH | BPF_B:
/* tmp = A */
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
*insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
break;
- /* RET_K is remaped into 2 insns. RET_A case doesn't need an
+ /* RET_K is remapped into 2 insns. RET_A case doesn't need an
* extra mov as EBPF_REG_0 is already mapped into BPF_REG_A.
*/
case BPF_RET | BPF_A:
* @note If some fields can not be supported by the
* hardware/driver, then the driver ignores those fields.
* Please check driver-specific documentation for limitations
- * and capablites.
+ * and capabilities.
*/
__extension__
struct {
/** The operation completed successfully. */
RTE_DMA_STATUS_SUCCESSFUL,
/** The operation failed to complete due abort by user.
- * This is mainly used when processing dev_stop, user could modidy the
+ * This is mainly used when processing dev_stop, user could modify the
* descriptors (e.g. change one bit to tell hardware abort this job),
* it allows outstanding requests to be complete as much as possible,
* so reduce the time to stop the device.
/**
* This call is easily portable to any architecture, however,
- * it may require a system call and inprecise for some tasks.
+ * it may require a system call and imprecise for some tasks.
*/
static inline uint64_t
__rte_rdtsc_syscall(void)
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
uint16_t ticket, w;
/* Acquire ownership of write-phase.
- * This is same as rte_tickelock_lock().
+ * This is same as rte_ticketlock_lock().
*/
ticket = __atomic_fetch_add(&pf->wr.in, 1, __ATOMIC_RELAXED);
rte_wait_until_equal_16(&pf->wr.out, ticket, __ATOMIC_ACQUIRE);
__rte_alloc_size(2);
/**
- * Allocate zero'ed memory from the heap.
+ * Allocate zeroed memory from the heap.
*
* Equivalent to rte_malloc() except that the memory zone is
* initialised with zeros. In NUMA systems, the memory allocated resides on the
__rte_alloc_size(2);
/**
- * Allocate zero'ed memory from the heap.
+ * Allocate zeroed memory from the heap.
*
* Equivalent to rte_malloc() except that the memory zone is
* initialised with zeros.
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next) {
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
rte_spinlock_lock(&intr_lock);
- /* check if the insterrupt source for the fd is existent */
+ /* check if the interrupt source for the fd is existent */
TAILQ_FOREACH(src, &intr_sources, next)
if (rte_intr_fd_get(src->intr_handle) == rte_intr_fd_get(intr_handle))
break;
typedef int (*vfio_dma_func_t)(int);
/* Custom memory region DMA mapping function prototype.
- * Takes VFIO container fd, virtual address, phisical address, length and
+ * Takes VFIO container fd, virtual address, physical address, length and
* operation type (0 to unmap 1 for map) as a parameters.
* Returns 0 on success, -1 on error.
**/
* @param arg
* Argument to the called function.
* @return
- * 0 on success, netagive error code on failure.
+ * 0 on success, negative error code on failure.
*/
int eal_intr_thread_schedule(void (*func)(void *arg), void *arg);
* display correctly on console. The problem can be fixed in two ways:
* (1) change the character set of console to 1252 using chcp utility
* and use Lucida Console font, or (2) use _cprintf function when
- * writing to console. The _cprinf() will re-encode ANSI strings to the
+ * writing to console. The _cprintf() will re-encode ANSI strings to the
* console code page so many non-ASCII characters will display correctly.
*/
static struct dirent*
wcstr[n] = 0;
}
- /* Length of resuting multi-byte string WITH zero
+ /* Length of resulting multi-byte string WITH zero
*terminator
*/
if (pReturnValue)
#define FNM_PREFIX_DIRS 0x20
/**
- * This function is used for searhing a given string source
+ * This function is used for searching a given string source
* with the given regular expression pattern.
*
* @param pattern
* regular expression notation describing the pattern to match
*
* @param string
- * source string to searcg for the pattern
+ * source string to search for the pattern
*
* @param flag
* containing information about the pattern
* Basic idea is to use lock prefixed add with some dummy memory location
* as the destination. From their experiments 128B(2 cache lines) below
* current stack pointer looks like a good candidate.
- * So below we use that techinque for rte_smp_mb() implementation.
+ * So below we use that technique for rte_smp_mb() implementation.
*/
static __rte_always_inline void
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_conf_get(rx_adapter_id, eth_dev_id,
rx_queue_id, &queue_conf)) {
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_stats_get(rx_adapter_id, eth_dev_id,
rx_queue_id, &q_stats)) {
token = strtok(NULL, "\0");
if (token != NULL)
RTE_EDEV_LOG_ERR("Extra parameters passed to eventdev"
- " telemetry command, igrnoring");
+ " telemetry command, ignoring");
if (rte_event_eth_rx_adapter_queue_stats_reset(rx_adapter_id,
eth_dev_id,
struct rte_fib {
char name[RTE_FIB_NAMESIZE];
enum rte_fib_type type; /**< Type of FIB struct */
- struct rte_rib *rib; /**< RIB helper datastruct */
+ struct rte_rib *rib; /**< RIB helper datastructure */
void *dp; /**< pointer to the dataplane struct*/
- rte_fib_lookup_fn_t lookup; /**< fib lookup function */
- rte_fib_modify_fn_t modify; /**< modify fib datastruct */
+ rte_fib_lookup_fn_t lookup; /**< FIB lookup function */
+ rte_fib_modify_fn_t modify; /**< modify FIB datastructure */
uint64_t def_nh;
};
* FIB object handle
* @return
* Pointer on the dataplane struct on success
- * NULL othervise
+ * NULL otherwise
*/
void *
rte_fib_get_dp(struct rte_fib *fib);
* FIB object handle
* @return
* Pointer on the RIB on success
- * NULL othervise
+ * NULL otherwise
*/
struct rte_rib *
rte_fib_get_rib(struct rte_fib *fib);
struct rte_fib6 {
char name[FIB6_NAMESIZE];
enum rte_fib6_type type; /**< Type of FIB struct */
- struct rte_rib6 *rib; /**< RIB helper datastruct */
+ struct rte_rib6 *rib; /**< RIB helper datastructure */
void *dp; /**< pointer to the dataplane struct*/
- rte_fib6_lookup_fn_t lookup; /**< fib lookup function */
- rte_fib6_modify_fn_t modify; /**< modify fib datastruct */
+ rte_fib6_lookup_fn_t lookup; /**< FIB lookup function */
+ rte_fib6_modify_fn_t modify; /**< modify FIB datastructure */
uint64_t def_nh;
};
* FIB6 object handle
* @return
* Pointer on the dataplane struct on success
- * NULL othervise
+ * NULL otherwise
*/
void *
rte_fib6_get_dp(struct rte_fib6 *fib);
* FIB object handle
* @return
* Pointer on the RIB6 on success
- * NULL othervise
+ * NULL otherwise
*/
struct rte_rib6 *
rte_fib6_get_rib(struct rte_fib6 *fib);
"Return list of IPsec SAs with telemetry enabled.");
rte_telemetry_register_cmd("/ipsec/sa/stats",
handle_telemetry_cmd_ipsec_sa_stats,
- "Returns IPsec SA stastistics. Parameters: int sa_spi");
+ "Returns IPsec SA statistics. Parameters: int sa_spi");
rte_telemetry_register_cmd("/ipsec/sa/details",
handle_telemetry_cmd_ipsec_sa_details,
"Returns IPsec SA configuration. Parameters: int sa_spi");
* @param keys
* Array of keys to be looked up in the SAD
* @param sa
- * Pointer assocoated with the keys.
+ * Pointer associated with the keys.
* If the lookup for the given key failed, then corresponding sa
* will be NULL
* @param n
memcpy(sa->hdr, prm->tun.hdr, prm->tun.hdr_len);
- /* insert UDP header if UDP encapsulation is inabled */
+ /* insert UDP header if UDP encapsulation is enabled */
if (sa->type & RTE_IPSEC_SATP_NATT_ENABLE) {
struct rte_udp_hdr *udph = (struct rte_udp_hdr *)
&sa->hdr[prm->tun.hdr_len];
/**
* @file
- * This file contains definion of RTE mbuf structure itself,
+ * This file contains definition of RTE mbuf structure itself,
* packet offload flags and some related macros.
* For majority of DPDK entities, it is not recommended to include
* this file directly, use include <rte_mbuf.h> instead.
# process all libraries equally, as far as possible
-# "core" libs first, then others alphebetically as far as possible
+# "core" libs first, then others alphabetically as far as possible
# NOTE: for speed of meson runs, the dependencies in the subdirectories
# sometimes skip deps that would be implied by others, e.g. if mempool is
# given as a dep, no need to mention ring. This is especially true for the
/**
* L2TPv2 message Header contains all options except ns_nr(length,
* offset size, offset padding).
- * Ns and Nr MUST be toghter.
+ * Ns and Nr MUST be together.
*/
struct rte_l2tpv2_msg_without_ns_nr {
rte_be16_t length; /**< length(16) */
/**
* L2TPv2 message Header contains all options except ns_nr(length, ns, nr).
- * offset size and offset padding MUST be toghter.
+ * offset size and offset padding MUST be together.
*/
struct rte_l2tpv2_msg_without_offset {
rte_be16_t length; /**< length(16) */
uint64_t n_pkts_miss;
/** Number of packets (with either lookup hit or miss) per pipeline
- * action. Array of pipeline *n_actions* elements indedex by the
+ * action. Array of pipeline *n_actions* elements indexed by the
* pipeline-level *action_id*, therefore this array has the same size
* for all the tables within the same pipeline.
*/
uint64_t n_pkts_forget;
/** Number of packets (with either lookup hit or miss) per pipeline action. Array of
- * pipeline *n_actions* elements indedex by the pipeline-level *action_id*, therefore this
+ * pipeline *n_actions* elements indexed by the pipeline-level *action_id*, therefore this
* array has the same size for all the tables within the same pipeline.
*/
uint64_t *n_pkts_action;
*/
INSTR_ALU_CKADD_FIELD, /* src = H */
INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */
- INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */
+ INSTR_ALU_CKADD_STRUCT, /* src = h.header, with any sizeof(header) */
/* cksub dst src
* dst = dst '- src
return;
}
- /* Header encapsulation (optionally, with prior header decasulation). */
+ /* Header encapsulation (optionally, with prior header decapsulation). */
if ((t->n_headers_out == 2) &&
(h1->ptr + h1->n_bytes == t->ptr) &&
(h0->ptr == h0->ptr0)) {
if (err_line)
*err_line = 0;
if (err_msg)
- *err_msg = "Null pipeline arument.";
+ *err_msg = "Null pipeline argument.";
status = -EINVAL;
goto error;
}
return -1;
}
- /* TODO: must set to max once enbling Turbo? Considering add condition:
+ /* TODO: must set to max once enabling Turbo? Considering add condition:
* if ((pi->turbo_available) && (pi->curr_idx <= 1))
*/
/* Max may have changed, so call to max function */
* backtracking positions remembered by any tokens inside the group.
* Example RegEx is `a(?>bc|b)c` if the given patterns are `abc` and `abcc` then
* `a(bc|b)c` matches both where as `a(?>bc|b)c` matches only abcc because
- * atomic groups don't allow backtracing back to `b`.
+ * atomic groups don't allow backtracking back to `b`.
*
* @see struct rte_regexdev_info::regexdev_capa
*/
#define RTE_REGEXDEV_SUPP_PCRE_BACKTRACKING_CTRL_F (1ULL << 3)
/**< RegEx device support PCRE backtracking control verbs.
- * Some examples of backtracing verbs are (*COMMIT), (*ACCEPT), (*FAIL),
+ * Some examples of backtracking verbs are (*COMMIT), (*ACCEPT), (*FAIL),
* (*SKIP), (*PRUNE).
*
* @see struct rte_regexdev_info::regexdev_capa
* @b EXPERIMENTAL: this API may change without prior notice.
*
* Compile local rule set and burn the complied result to the
- * RegEx deive.
+ * RegEx device.
*
* @param dev_id
* RegEx device identifier.
/**
* @file
- * This file contains definion of RTE ring structure itself,
+ * This file contains definition of RTE ring structure itself,
* init flags and some related macros.
* For majority of DPDK entities, it is not recommended to include
* this file directly, use include <rte_ring.h> or <rte_ring_elem.h>
}
/**
- * @brief Decides if new packet should be enqeued or dropped for non-empty queue
+ * @brief Decides if new packet should be enqueued or dropped for non-empty queue
*
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
* @param pie [in,out] data pointer to PIE runtime data
}
/**
- * @brief Decides if new packet should be enqeued or dropped
+ * @brief Decides if new packet should be enqueued or dropped
* Updates run time data and gives verdict whether to enqueue or drop the packet.
*
* @param pie_cfg [in] config pointer to a PIE configuration parameter structure
*
* @return Operation status
* @retval 0 enqueue the packet
- * @retval 1 drop the packet based on drop probility criteria
+ * @retval 1 drop the packet based on drop probability criteria
*/
static inline int
__rte_experimental
}
/**
- * @brief Decides if new packet should be enqeued or dropped in queue non-empty case
+ * @brief Decides if new packet should be enqueued or dropped in queue non-empty case
*
* @param red_cfg [in] config pointer to a RED configuration parameter structure
* @param red [in,out] data pointer to RED runtime data
}
/**
- * @brief Decides if new packet should be enqeued or dropped
+ * @brief Decides if new packet should be enqueued or dropped
* Updates run time data based on new queue size value.
* Based on new queue average and RED configuration parameters
* gives verdict whether to enqueue or drop the packet.
int socket;
/* Timing */
- uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cyles */
+ uint64_t time_cpu_cycles; /* Current CPU time measured in CPU cycles */
uint64_t time_cpu_bytes; /* Current CPU time measured in bytes */
uint64_t time; /* Current NIC TX time measured in bytes */
struct rte_reciprocal inv_cycles_per_byte; /* CPU cycles per byte */
*
* Hierarchical scheduler subport bandwidth profile add
* Note that this function is safe to use in runtime for adding new
- * subport bandwidth profile as it doesn't have any impact on hiearchical
+ * subport bandwidth profile as it doesn't have any impact on hierarchical
* structure of the scheduler.
* @param port
* Handle to port scheduler instance
* operations into the same table.
*
* The typical reason an implementation may choose to split the table lookup
- * operation into multiple steps is to hide the latency of the inherrent memory
+ * operation into multiple steps is to hide the latency of the inherent memory
* read operations: before a read operation with the source data likely not in
* the CPU cache, the source data prefetch is issued and the table lookup
* operation is postponed in favor of some other unrelated work, which the CPU
* mechanism allows for multiple concurrent select operations into the same table.
*
* The typical reason an implementation may choose to split the operation into multiple steps is to
- * hide the latency of the inherrent memory read operations: before a read operation with the
+ * hide the latency of the inherent memory read operations: before a read operation with the
* source data likely not in the CPU cache, the source data prefetch is issued and the operation is
* postponed in favor of some other unrelated work, which the CPU executes in parallel with the
* source data being fetched into the CPU cache; later on, the operation is resumed, this time with
}
rc = pthread_create(&t_old, NULL, socket_listener, &v1_socket);
if (rc != 0) {
- TMTY_LOG(ERR, "Error with create legcay socket thread: %s\n",
+ TMTY_LOG(ERR, "Error with create legacy socket thread: %s\n",
strerror(rc));
close(v1_socket.sock);
v1_socket.sock = -1;
/**
* @internal
* Copies a value into a buffer if the buffer has enough available space.
- * Nothing written to buffer if an overflow ocurs.
+ * Nothing written to buffer if an overflow occurs.
* This function is not for use for values larger than given buffer length.
*/
__rte_format_printf(3, 4)
struct uffdio_register reg_struct;
/*
- * Let's register all the mmap'ed area to ensure
+ * Let's register all the mmapped area to ensure
* alignment on page boundary.
*/
reg_struct.range.start = (uint64_t)(uintptr_t)reg->mmap_addr;
msg->fd_num = 0;
send_vhost_reply(main_fd, msg);
- /* Wait for qemu to acknolwedge it's got the addresses
+ /* Wait for qemu to acknowledge it got the addresses
* we've got to wait before we're allowed to generate faults.
*/
if (read_vhost_message(main_fd, &ack_msg) <= 0) {