Fix spelling errors in the doxygen docs.
Signed-off-by: John McNamara <john.mcnamara@intel.com>
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
- * accelerate crypto assymetric applications
+ * accelerate crypto asymmetric applications
*/
#ifndef _ICP_QAT_FW_MMP_IDS_
* @brief
* This file documents the external interfaces that the QAT FW running
* on the QAT Acceleration Engine provides to clients wanting to
- * accelerate crypto assymetric applications
+ * accelerate crypto asymmetric applications
*/
#ifndef _ICP_QAT_FW_PKE_H_
* packets.
*
* A opdl_ring can be used as the basis for pipeline based applications. Instead
- * of each stage in a pipeline dequeueing from a ring, processing and enqueueing
+ * of each stage in a pipeline dequeuing from a ring, processing and enqueuing
* to another ring, it can process entries in-place on the ring. If stages do
* not depend on each other, they can run in parallel.
*
* Enabling this may have a negative impact on performance if only one thread
* will be processing this stage.
* @param is_input
- * Indication to nitialise the stage with all slots available or none
+ * Indication to initialise the stage with all slots available or none
*
* @return
* A pointer to the new stage, or NULL on error.
* Compare the event descriptor with original version in the ring.
* if key field event descriptor is changed by application, then
* update the slot in the ring otherwise do nothing with it.
- * the key field is flow_id, prioirty, mbuf, impl_opaque
+ * the key field is flow_id, priority, mbuf, impl_opaque
*
* @param s
* The opdl_stage.
* @param atomic
* queue type associate with the stage.
* @return
- * if the evevnt key field is changed compare with previous record.
+ * if the event key field is changed compare with previous record.
*/
bool
struct rte_pmd_i40e_pkt_template_input {
/** the pctype used for raw packet template */
uint16_t pctype;
- /** the buffer conatining raw packet template */
+ /** the buffer containing raw packet template */
void *packet;
/** the length of buffer with raw packet template */
uint32_t length;
* @param conf
* Specifies configuration parameters of raw packet template filter.
* @param add
- * Speicifes an action to be taken - add or remove raw packet template filter.
+ * Specifies an action to be taken - add or remove raw packet template filter.
* @return
* - (0) if successful.
* - (-ENODEV) if *port* invalid.
return -EIO;
/*
- * Allocating rte mbuffs for configured rx queues.
+ * Allocating rte mbufs for configured rx queues.
* This requires queues being enabled before
*/
if (nfp_net_rx_freelist_setup(dev) < 0) {
if (rxq == NULL)
return -ENOMEM;
- /* Hw queues mapping based on firmware confifguration */
+ /* Hw queues mapping based on firmware configuration */
rxq->qidx = queue_idx;
rxq->fl_qcidx = queue_idx * hw->stride_rx;
rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
socket_id);
if (tz == NULL) {
- PMD_DRV_LOG(ERR, "Error allocatig rx dma");
+ PMD_DRV_LOG(ERR, "Error allocating rx dma");
nfp_net_rx_queue_release(rxq);
return -ENOMEM;
}
/*
* RX path design:
*
- * There are some decissions to take:
+ * There are some decisions to take:
* 1) How to check DD RX descriptors bit
* 2) How and when to allocate new mbufs
*
rte_rmb();
/*
- * We got a packet. Let's alloc a new mbuff for refilling the
+ * We got a packet. Let's alloc a new mbuf for refilling the
* free descriptor ring as soon as possible
*/
new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
nb_hold++;
/*
- * Grab the mbuff and refill the descriptor with the
- * previously allocated mbuff
+ * Grab the mbuf and refill the descriptor with the
+ * previously allocated mbuf
*/
mb = rxb->mbuf;
rxb->mbuf = new_mb;
return -EINVAL;
}
- /* Filling the received mbuff with packet info */
+ /* Filling the received mbuf with packet info */
if (hw->rx_offset)
mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
else
mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
- /* Adding the mbuff to the mbuff array passed by the app */
+ /* Adding the mbuf to the mbuf array passed by the app */
rx_pkts[avail++] = mb;
/* Now resetting and updating the descriptor */
*
* @file dpdk/pmd/nfp_net_pmd.h
*
- * Netronome NFP_NET PDM driver
+ * Netronome NFP_NET PMD driver
*/
#ifndef _NFP_NET_PMD_H_
uint32_t tx_free_thresh;
/*
- * For each descriptor keep a reference to the mbuff and
+ * For each descriptor keep a reference to the mbuf and
* DMA address used until completion is signalled.
*/
struct {
};
/**
- * If user has configued a Virtual Queue mode, but for some particular VQ
+ * If user has configured a Virtual Queue mode, but for some particular VQ
* user needs an exclusive H/W queue associated (for better performance
* on that particular VQ), then user can pass this flag while creating the
* Virtual Queue. A H/W queue will be allocated corresponding to
* VQ's at runtime.
*
* @param vq_id
- * Virtual Queue ID which needs to be deinialized.
+ * Virtual Queue ID which needs to be uninitialized.
*
* @returns
* - 0: Success.
* VQ's at runtime.
*
* @param vq_id
- * RBP based Virtual Queue ID which needs to be deinialized.
+ * RBP based Virtual Queue ID which needs to be uninitialized.
*
* @returns
* - 0: Success.
* Creates an lthread and places it in the ready queue on a particular
* lcore.
*
- * If no scheduler exists yet on the curret lcore then one is created.
+ * If no scheduler exists yet on the current lcore then one is created.
*
* @param new_lt
* Pointer to an lthread pointer that will be initialized
* @param lcore
- * the lcore the thread should be started on or the current clore
+ * the lcore the thread should be started on or the current lcore
* -1 the current lcore
* 0 - LTHREAD_MAX_LCORES any other lcore
* @param lthread_func
/**
* Set lthread TLS
*
- * This function is modelled on pthread_set_sepcific()
+ * This function is modelled on pthread_set_specific()
* It associates a thread-specific value with a key obtained via a previous
* call to lthread_key_create().
* Different threads may bind different values to the same key. These values
* Signal a condition variable
*
* The function unblocks one thread waiting for the condition variable cond.
- * If no threads are waiting on cond, the rte_lthead_cond_signal() function
+ * If no threads are waiting on cond, the rte_lthread_cond_signal() function
* has no effect.
*
* @param cond
* Broadcast a condition variable
*
* The function unblocks all threads waiting for the condition variable cond.
- * If no threads are waiting on cond, the rte_lthead_cond_broadcast()
+ * If no threads are waiting on cond, the rte_lathed_cond_broadcast()
* function has no effect.
*
* @param cond
/*
- * Takes 2 SIMD registers containing N transitions eachi (tr0, tr1).
+ * Takes 2 SIMD registers containing N transitions each (tr0, tr1).
* Shuffles it into different representation:
* lo - contains low 32 bits of given N transitions.
* hi - contains high 32 bits of given N transitions.
\
dfa_ofs = _##P##_sub_epi32(t, r); \
\
- /* QUAD/SINGLE caluclations. */ \
+ /* QUAD/SINGLE calculations. */ \
t = _##P##_cmpgt_epi8(in, tr_hi); \
t = _##P##_sign_epi8(t, t); \
t = _##P##_maddubs_epi16(t, t); \
#define RTE_BBDEV_MAX_DEVS 128 /**< Max number of devices */
#endif
-/** Flags indiciate current state of BBDEV device */
+/** Flags indicate current state of BBDEV device */
enum rte_bbdev_state {
RTE_BBDEV_UNUSED,
RTE_BBDEV_INITIALIZED
/**
* Start a device.
- * This is the last step needed before enqueueing operations is possible.
+ * This is the last step needed before enqueuing operations is possible.
*
* @param dev_id
* The identifier of the device.
* Create a new eBPF execution context and load given BPF code into it.
*
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @return
* BPF handle that is used in future BPF operations,
* or NULL on error, with error code set in rte_errno.
* file into it.
*
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param fname
* Pathname for a ELF file.
* @param sname
uint32_t num);
/**
- * Provide information about natively compield code for given BPF handle.
+ * Provide information about natively compiled code for given BPF handle.
*
* @param bpf
* handle for the BPF code.
* @param sname
* Name of the executable section within the file to load.
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).
* @param sname
* Name of the executable section within the file to load.
* @param prm
- * Parameters used to create and initialise the BPF exeution context.
+ * Parameters used to create and initialise the BPF execution context.
* @param flags
* Flags that define expected expected behavior of the loaded filter
* (i.e. jited/non-jited version to use).
/**< RSA no padding scheme */
RTE_CRYPTO_RSA_PKCS1_V1_5_BT0,
/**< RSA PKCS#1 V1.5 Block Type 0 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT1,
/**< RSA PKCS#1 V1.5 Block Type 01 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PKCS1_V1_5_BT2,
/**< RSA PKCS#1 V1.5 Block Type 02 padding scheme
- * as descibed in rfc2313
+ * as described in rfc2313
*/
RTE_CRYPTO_RSA_PADDING_OAEP,
/**< RSA PKCS#1 OAEP padding scheme */
/**
* Asymmetric Modular exponentiation transform data
*
- * Structure describing modular exponentation xform param
+ * Structure describing modular exponentiation xform param
*
*/
struct rte_crypto_modex_xform {
rte_crypto_param p;
/**< p : Prime modulus data
- * DH prime modulous data in octet-string network byte order format.
+ * DH prime modulus data in octet-string network byte order format.
*
*/
* data in octet-string network byte order format.
*
* This field shall be big enough to hold the result of Modular
- * Exponentiation or Modular Multplicative Inverse
+ * Exponentiation or Modular Multiplicative Inverse
* (bigger or equal to length of modulus)
*/
};
* the user data size.
*
* @return
- * Size of the symmetric eader session.
+ * Size of the symmetric header session.
*/
unsigned int
rte_cryptodev_sym_get_header_session_size(void);
/**
* Maximum number of workers allowed.
- * Be aware of increasing the limit, becaus it is limited by how we track
+ * Be aware of increasing the limit, because it is limited by how we track
* in-flight tags. See in_flight_bitmask and rte_distributor_process
*/
#define RTE_DISTRIB_MAX_WORKERS 64
*
* @param userfunc
* User function which replaces rte_delay_us. rte_delay_us_block restores
- * buildin block delay function.
+ * builtin block delay function.
*/
void rte_delay_us_callback_register(void(*userfunc)(unsigned int));
*
* A device class defines the type of function a device
* will be used for e.g.: Ethernet adapter (eth),
- * cryptographic coprocessor (crypto), etc.
+ * cryptographic co-processor (crypto), etc.
*/
#ifdef __cplusplus
* Aligns input parameter to the next power of 2
*
* @param x
- * The integer value to algin
+ * The integer value to align
*
* @return
* Input parameter aligned to the next power of 2
* Aligns input parameter to the previous power of 2
*
* @param x
- * The integer value to algin
+ * The integer value to align
*
* @return
* Input parameter aligned to the previous power of 2
*
* Send a message to the peer process.
*
- * This function will send a message which will be responsed by the action
+ * This function will send a message which will be responded by the action
* identified by name in the peer process.
*
* @param msg
#define RTE_EAL_TAILQ_RWLOCK (&rte_eal_get_configuration()->mem_config->qlock)
/**
- * macro to get the multiple lock of mempool shared by mutiple-instance
+ * macro to get the multiple lock of mempool shared by multiple-instance
*/
#define RTE_EAL_MEMPOOL_RWLOCK (&rte_eal_get_configuration()->mem_config->mplock)
struct rte_log_dynamic_type *dynamic_types;
};
-/** Global log informations */
+/** Global log information */
extern struct rte_logs rte_logs;
/* SDK log type */
int32_t rte_service_lcore_list(uint32_t array[], uint32_t n);
/**
- * Get the numer of services running on the supplied lcore.
+ * Get the number of services running on the supplied lcore.
*
* @param lcore Id of the service core.
* @retval >=0 Number of services registered to this core.
};
/**
- * Return the first tailq entry casted to the right struct.
+ * Return the first tailq entry cast to the right struct.
*/
#define RTE_TAILQ_CAST(tailq_entry, struct_name) \
(struct struct_name *)&(tailq_entry)->tailq_head
#include <stdbool.h>
/**
- * Struct describing a Universal Unique Identifer
+ * Struct describing a Universal Unique Identifier
*/
typedef unsigned char rte_uuid_t[16];
* @param uu
* Destination UUID
* @return
- * Returns 0 on succes, and -1 if string is not a valid UUID.
+ * Returns 0 on success, and -1 if string is not a valid UUID.
*/
int rte_uuid_parse(const char *in, rte_uuid_t uu);
* an error on BSD.
*
* @param vfio_group_fd
- * VFIO Grouup FD.
+ * VFIO Group FD.
*
* @return
* 0 on success.
if (mcfg->dma_maskbits &&
rte_mem_check_dma_mask_thread_unsafe(mcfg->dma_maskbits)) {
RTE_LOG(ERR, EAL,
- "%s(): couldnt allocate memory due to IOVA exceeding limits of current DMA mask.\n",
+ "%s(): couldn't allocate memory due to IOVA exceeding limits of current DMA mask.\n",
__func__);
if (rte_eal_iova_mode() == RTE_IOVA_VA &&
rte_eal_using_phys_addrs())
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
- * This is a fatal error, and the table is now in an indeterminite state
+ * This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0 - success
uint16_t vlan_tci_mask; /**< Bit mask for vlan_tci in big endian */
/** Bit mask for ipv4 flow in big endian. */
struct rte_eth_ipv4_flow ipv4_mask;
- /** Bit maks for ipv6 flow in big endian. */
+ /** Bit mask for ipv6 flow in big endian. */
struct rte_eth_ipv6_flow ipv6_mask;
/** Bit mask for L4 source port in big endian. */
uint16_t src_port_mask;
/*
* A packet can be identified by hardware as different flow types. Different
- * NIC hardwares may support different flow types.
+ * NIC hardware may support different flow types.
* Basically, the NIC hardware identifies the flow type as deep protocol as
* possible, and exclusively. For example, if a packet is identified as
* 'RTE_ETH_FLOW_NONFRAG_IPV4_TCP', it will not be any of other flow types,
/*
* The RSS offload types are defined based on flow types.
- * Different NIC hardwares may support different RSS offload
+ * Different NIC hardware may support different RSS offload
* types. The supported flow types or RSS offload types can be queried by
* rte_eth_dev_info_get().
*/
* A pointer to an ids array passed by application. This tells which
* statistics values function should retrieve. This parameter
* can be set to NULL if size is 0. In this case function will retrieve
- * all avalible statistics.
+ * all available statistics.
* @param values
* A pointer to a table to be filled with device statistics values.
* @param size
/**
* Request the driver to free mbufs currently cached by the driver. The
* driver will only free the mbuf if it is no longer in use. It is the
- * application's responsibity to ensure rte_eth_tx_buffer_flush(..) is
+ * application's responsibility to ensure rte_eth_tx_buffer_flush(..) is
* called if needed.
*
* @param port_id
typedef void (*eth_dev_infos_get_t)(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
-/**< @internal Get specific informations of an Ethernet device. */
+/**< @internal Get specific information of an Ethernet device. */
typedef const uint32_t *(*eth_dev_supported_ptypes_get_t)(struct rte_eth_dev *dev);
/**< @internal Get supported ptypes of an Ethernet device. */
/**< @internal Get Traffic Management (TM) operations on an Ethernet device */
typedef int (*eth_mtr_ops_get_t)(struct rte_eth_dev *dev, void *ops);
-/**< @internal Get Trafffic Metering and Policing (MTR) operations */
+/**< @internal Get Traffic Metering and Policing (MTR) operations */
typedef int (*eth_get_dcb_info)(struct rte_eth_dev *dev,
struct rte_eth_dcb_info *dcb_info);
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * PMD helper function for cleaing up the resources of a ethdev port on it's
+ * PMD helper function for cleaning up the resources of a ethdev port on it's
* destruction.
*
* @param ethdev
uint32_t sched_wfq_weight_max;
/** WRED packet mode support. When non-zero, this parameter indicates
- * that there is atleast one leaf node that supports the WRED packet
+ * that there is at least one leaf node that supports the WRED packet
* mode, which might not be true for all the leaf nodes. In packet
* mode, the WRED thresholds specify the queue length in packets, as
* opposed to bytes.
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this parameter indicates that
- * there is atleast one leaf node that supports the WRED byte mode,
+ * there is at least one leaf node that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In byte mode, the
* WRED thresholds specify the queue length in bytes, as opposed to
* packets.
uint32_t shaper_shared_n_max;
/** WRED packet mode support. When non-zero, this
- * parameter indicates that there is atleast one leaf
+ * parameter indicates that there is at least one leaf
* node on this level that supports the WRED packet
* mode, which might not be true for all the leaf
* nodes. In packet mode, the WRED thresholds specify
int cman_wred_packet_mode_supported;
/** WRED byte mode support. When non-zero, this
- * parameter indicates that there is atleast one leaf
+ * parameter indicates that there is at least one leaf
* node on this level that supports the WRED byte mode,
* which might not be true for all the leaf nodes. In
* byte mode, the WRED thresholds specify the queue
* - rte_event_crypto_adapter_stats_get()
* - rte_event_crypto_adapter_stats_reset()
- * The applicaton creates an instance using rte_event_crypto_adapter_create()
+ * The application creates an instance using rte_event_crypto_adapter_create()
* or rte_event_crypto_adapter_create_ext().
*
* Cryptodev queue pair addition/deletion is done using the
* For SW based packet transfers, i.e., when the
* RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT is not set in the adapter's
* capabilities flags for a particular ethernet device, the service function
- * temporarily enqueues mbufs to an event buffer before batch enqueueing these
+ * temporarily enqueues mbufs to an event buffer before batch enqueuing these
* to the event device. If the buffer fills up, the service function stops
- * dequeueing packets from the ethernet device. The application may want to
+ * dequeuing packets from the ethernet device. The application may want to
* monitor the buffer fill level and instruct the service function to
* selectively buffer packets. The application may also use some other
* criteria to decide which packets should enter the event device even when
*/
#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
-/**< Flag indicates HW/SW suports a mechanism to store and retrieve
+/**< Flag indicates HW/SW supports a mechanism to store and retrieve
* the private data information along with the crypto session.
*/
* @see rte_event_port_unlink() to issue unlink requests.
*
* @param dev_id
- * The indentifier of the device.
+ * The identifier of the device.
*
* @param port_id
* Event port identifier to select port to check for unlinks in progress.
* Ethernet device pointer
*
* @param tx_queue_id
- * Transmt queue index
+ * Transmit queue index
*
* @return
* - 0: Success.
struct rte_flow_error *error);
/**
- * Add a flow classify rule to the flow_classifer table.
+ * Add a flow classify rule to the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
struct rte_flow_error *error);
/**
- * Delete a flow classify rule from the flow_classifer table.
+ * Delete a flow classify rule from the flow_classifier table.
*
* @param[in] cls
* Flow classifier handle
/** Flag to support reader writer concurrency */
#define RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY 0x04
-/** Flag to indicate the extendabe bucket table feature should be used */
+/** Flag to indicate the extendable bucket table feature should be used */
#define RTE_HASH_EXTRA_FLAGS_EXT_TABLE 0x08
/** Flag to disable freeing of key index on hash delete.
/**
* This function implements reassembly of fragmented IPv4 packets.
- * Incoming mbufs should have its l2_len/l3_len fields setup correclty.
+ * Incoming mbufs should have its l2_len/l3_len fields setup correctly.
*
* @param tbl
* Table where to lookup/add the fragmented packet.
/*
* Helper routine to copy IV
- * Righ now we support only algorithms with IV length equals 0/8/16 bytes.
+ * Right now we support only algorithms with IV length equals 0/8/16 bytes.
*/
static inline void
copy_iv(uint64_t dst[IPSEC_MAX_IV_QWORD],
/**
* @file misc.h
- * Contains miscelaneous functions/structures/macros used internally
+ * Contains miscellaneous functions/structures/macros used internally
* by ipsec library.
*/
* @b EXPERIMENTAL: this API may change without prior notice
*
* RTE IPsec support.
- * It is not recommended to include this file direclty,
+ * It is not recommended to include this file directly,
* include <rte_ipsec.h> instead.
* Contains helper functions to process completed crypto-ops
* and group related packets by sessions they belong to.
/**
* Calculate required SA size based on provided input parameters.
* @param prm
- * Parameters that wil be used to initialise SA object.
+ * Parameters that will be used to initialise SA object.
* @return
* - Actual size required for SA with given parameters.
* - -EINVAL if the parameters are invalid.
* Note: This function pointer is for future flow based latency stats
* implementation.
*
- * Function type used for identifting flow types of a Rx packet.
+ * Function type used for identifying flow types of a Rx packet.
*
* The callback function is called on Rx for each packet.
* This function is used for flow based latency calculations.
* @param hop
* Next hop of the most specific rule found for IP (valid on lookup hit only).
* This is an 4 elements array of two byte values.
- * If the lookup was succesfull for the given IP, then least significant byte
+ * If the lookup was successful for the given IP, then least significant byte
* of the corresponding element is the actual next hop and the most
* significant byte is zero.
* If the lookup for the given IP failed, then corresponding element would
/**
* Outer UDP checksum offload flag. This flag is used for enabling
* outer UDP checksum in PMD. To use outer UDP checksum, the user needs to
- * 1) Enable the following in mbuff,
+ * 1) Enable the following in mbuf,
* a) Fill outer_l2_len and outer_l3_len in mbuf.
* b) Set the PKT_TX_OUTER_UDP_CKSUM flag.
* c) Set the PKT_TX_OUTER_IPV4 or PKT_TX_OUTER_IPV6 flag.
}; /**< Hierarchical scheduler */
/**
- * enum for the tx_offload bit-fields lenghts and offsets.
+ * enum for the tx_offload bit-fields lengths and offsets.
* defines the layout of rte_mbuf tx_offload field.
*/
enum {
* The given mbuf must have only one segment.
*
* @param m
- * The packet mbuf to be resetted.
+ * The packet mbuf to be reset.
*/
#define MBUF_INVALID_PORT UINT16_MAX
* ``rte_pktmbuf_detach()``.
*
* Memory for shared data must be provided and user must initialize all of
- * the content properly, escpecially free callback and refcnt. The pointer
+ * the content properly, especially free callback and refcnt. The pointer
* of shared data will be stored in m->shinfo.
* ``rte_pktmbuf_ext_shinfo_init_helper`` can help to simply spare a few
* bytes at the end of buffer for the shared data, store free callback and
*/
#define RTE_PTYPE_TUNNEL_ESP 0x00009000
/**
- * L2TP (Layer 2 Tunneling Protocol) tunnleing packet type.
+ * L2TP (Layer 2 Tunneling Protocol) tunneling packet type.
*
* Packet format:
* <'ether type'=0x0800
* @warning
* @b EXPERIMENTAL: this API may change without prior notice.
*
- * Dequeue a number of contiquous object blocks from the external pool.
+ * Dequeue a number of contiguous object blocks from the external pool.
*/
typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp,
void **first_obj_table, unsigned int n);
&cache->objs[cache->len], req);
if (unlikely(ret < 0)) {
/*
- * In the offchance that we are buffer constrained,
+ * In the off chance that we are buffer constrained,
* where we are not able to allocate cache + n, go to
* the ring directly. If that fails, we are truly out of
* buffers.
};
};
-/** QinQ_PPPoE encap paramaeters. */
+/** QinQ_PPPoE encap parameters. */
struct rte_table_encap_ether_qinq_pppoe {
/** Only valid when *type* is set to QinQ. */
uint32_t cur_train_iter;
};
-/* Each Worder Thread Empty Poll Stats */
+/* Each Worker Thread Empty Poll Stats */
struct priority_worker {
/* Current dequeue and throughput counts */
#include <rte_memory.h>
#include <rte_errno.h>
-/* Rawdevice object - essentially a void to be typecasted by implementation */
+/* Rawdevice object - essentially a void to be typecast by implementation */
typedef void *rte_rawdev_obj_t;
/**
* @param dev_id
* Raw device identifiers
* @return
- * 0 for sucessful reset,
+ * 0 for successful reset,
* !0 for failure in resetting
*/
int
* @param dev_id
* The identifier of the device to configure.
* @param buffers
- * Collection of buffers for enqueueing
+ * Collection of buffers for enqueuing
* @param count
* Count of buffers to enqueue
* @param context
* an opaque object representing context of the call; for example, an
* application can pass information about the queues on which enqueue needs
* to be done. Or, the enqueue operation might be passed reference to an
- * object containing a callback (agreed upon between applicatio and driver).
+ * object containing a callback (agreed upon between application and driver).
*
* @return
* >=0 Count of buffers successfully enqueued (0: no buffers enqueued)
rte_rawdev_obj_t version_info);
/**
- * Load firwmare from a buffer (DMA'able)
+ * Load firmware from a buffer (DMA'able)
*
* @param dev
* Raw device pointer
rte_rawdev_obj_t firmware_buf);
/**
- * Unload firwmare
+ * Unload firmware
*
* @param dev
* Raw device pointer
/**< Reset the statistics values in xstats. */
rawdev_xstats_reset_t xstats_reset;
- /**< Obtainer firmware status */
+ /**< Obtain firmware status */
rawdev_firmware_status_get_t firmware_status_get;
/**< Obtain firmware version information */
rawdev_firmware_version_get_t firmware_version_get;
* and return a pointer to it.
*
* @param name
- * Name of the reorder buffer instacne as passed to rte_reorder_create()
+ * Name of the reorder buffer instance as passed to rte_reorder_create()
* @return
* Pointer to reorder buffer instance or NULL if object not found with rte_errno
* set appropriately. Possible rte_errno values include:
* (powerpc/arm).
* There are 2 choices for the users
* 1.use rmb() memory barrier
- * 2.use one-direcion load_acquire/store_release barrier,defined by
+ * 2.use one-direction load_acquire/store_release barrier,defined by
* CONFIG_RTE_USE_C11_MEM_MODEL=y
* It depends on performance test results.
* By default, move common functions to rte_ring_generic.h
* classes of the same subport;
* - When any subport traffic class is oversubscribed
* (configuration time event), the usage of subport member
- * pipes with high demand for thattraffic class pipes is
+ * pipes with high demand for that traffic class pipes is
* truncated to a dynamically adjusted value with no
* impact to low demand pipes;
* 3. Pipe:
enum rte_security_pdcp_domain domain;
/**< PDCP mode of operation: Control or data */
uint32_t capa_flags;
- /**< Capabilitity flags, see RTE_SECURITY_PDCP_* */
+ /**< Capability flags, see RTE_SECURITY_PDCP_* */
} pdcp;
/**< PDCP capability */
};
#define RTE_SECURITY_TX_HW_TRAILER_OFFLOAD 0x00000002
/**< HW constructs trailer of packets
* Transmitted packets will have the trailer added to them
- * by hardawre. The next protocol field will be based on
+ * by hardware. The next protocol field will be based on
* the mbuf->inner_esp_next_proto field.
*/
#define RTE_SECURITY_RX_HW_TRAILER_OFFLOAD 0x00010000
* be picked and dropped, the most likely candidate for drop, i.e. the
* current LRU key, is always picked. The LRU logic requires maintaining
* specific data structures per each bucket. Use-cases: flow cache, etc.
- * b. Extendible bucket (ext): The bucket is extended with space for 4 more
+ * b. Extendable bucket (ext): The bucket is extended with space for 4 more
* keys. This is done by allocating additional memory at table init time,
* which is used to create a pool of free keys (the size of this pool is
* configurable and always a multiple of 4). On key add operation, the
* current bucket is in extended state and a match is not found in the
* first group of 4 keys, the search continues beyond the first group of
* 4 keys, potentially until all keys in this bucket are examined. The
- * extendible bucket logic requires maintaining specific data structures
+ * extendable bucket logic requires maintaining specific data structures
* per table and per each bucket. Use-cases: flow table, etc.
* 2. Key size:
* a. Configurable key size
uint64_t seed;
};
-/** Extendible bucket hash table operations */
+/** Extendable bucket hash table operations */
extern struct rte_table_ops rte_table_hash_ext_ops;
extern struct rte_table_ops rte_table_hash_key8_ext_ops;
extern struct rte_table_ops rte_table_hash_key16_ext_ops;
* virtio queue index
*
* @return
- * num of avail entires left
+ * num of avail entries left
*/
uint16_t rte_vhost_avail_entries(int vid, uint16_t queue_id);
/**
* Get guest mem table: a list of memory regions.
*
- * An rte_vhost_vhost_memory object will be allocated internaly, to hold the
+ * An rte_vhost_vhost_memory object will be allocated internally, to hold the
* guest memory regions. Application should free it at destroy_device()
* callback.
*