/*
* Uncommenting the next line will cause the find_match
- * function to be optimised out, making this function
+ * function to be optimized out, making this function
* do parallel (non-atomic) distribution
*/
/* matches[j] = 0; */
/*
* Return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog.
+ * being worked on or queued up in a backlog.
*/
static inline unsigned int
total_outstanding(const struct rte_distributor *d)
#endif
/*
- * Set up the backog tags so they're pointing at the second cache
+ * Set up the backlog tags so they're pointing at the second cache
* line for performance during flow matching
*/
for (i = 0 ; i < num_workers ; i++)
* @param alg_type
* Call the legacy API, or use the new burst API. legacy uses 32-bit
* flow ID, and works on a single packet at a time. Latest uses 15-
- * bit flow ID and works on up to 8 packets at a time to worers.
+ * bit flow ID and works on up to 8 packets at a time to workers.
* @return
* The newly created distributor instance
*/
/*
* Transfer up to 8 mbufs at a time to/from workers, and
- * flow matching algorithm optimised for 8 flow IDs at a time
+ * flow matching algorithm optimized for 8 flow IDs at a time
*/
#define RTE_DIST_BURST_SIZE 8
VERSION_SYMBOL(rte_distributor_returned_pkts, _v20, 2.0);
/* return the number of packets in-flight in a distributor, i.e. packets
- * being workered on or queued up in a backlog. */
+ * being worked on or queued up in a backlog.
+ */
static inline unsigned
total_outstanding(const struct rte_distributor_v20 *d)
{
{RTE_LOGTYPE_USER8, "user8"}
};
-/* Logging should be first initialzer (before drivers and bus) */
+/* Logging should be first initializer (before drivers and bus) */
RTE_INIT_PRIO(rte_log_init, 101);
static void
rte_log_init(void)
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
+ * encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/
* We split the remaining bytes (which will be less than 256) into
* 64byte (2^6) chunks.
* Using incrementing integers in the case labels of a switch statement
- * enourages the compiler to use a jump table. To get incrementing
+ * encourages the compiler to use a jump table. To get incrementing
* integers, we shift the 2 relevant bits to the LSB position to first
* get decrementing integers, and then subtract.
*/
/**
* Usage function typedef used by the application usage function.
*
- * Use this function typedef to define and call rte_set_applcation_usage_hook()
+ * Use this function typedef to define and call rte_set_application_usage_hook()
* routine.
*/
typedef void (*rte_usage_hook_t)(const char * prgname);
* The string identifying the log type.
* @return
* - >0: success, the returned value is the log type identifier.
- * - (-ENONEM): cannot allocate memory.
+ * - (-ENOMEM): cannot allocate memory.
*/
int rte_log_register(const char *name);
}
/*
- * joing two struct malloc_elem together. elem1 and elem2 must
+ * join two struct malloc_elem together. elem1 and elem2 must
* be contiguous in memory.
*/
static inline void
service = &rte_services[id]; \
} while (0)
-/* returns 1 if statistics should be colleced for service
+/* returns 1 if statistics should be collected for service
* Returns 0 if statistics should not be collected for service
*/
static inline int
* hugetlbfs, then mmap() hugepage_sz data in it. If orig is set, the
* virtual address is stored in hugepg_tbl[i].orig_va, else it is stored
* in hugepg_tbl[i].final_va. The second mapping (when orig is 0) tries to
- * map continguous physical blocks in contiguous virtual blocks.
+ * map contiguous physical blocks in contiguous virtual blocks.
*/
static unsigned
map_all_hugepages(struct hugepage_file *hugepg_tbl, struct hugepage_info *hpi,
/*
* This function runs on a specific thread to update a global variable
- * containing used to process MSB of the HPET (unfortunatelly, we need
+ * containing used to process MSB of the HPET (unfortunately, we need
* this because hpet is 32 bits by default under linux).
*/
static void
else
phy->ops.get_cable_length = e1000_get_cable_length_m88;
phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_m88;
- /* Check if this PHY is confgured for media swap. */
+ /* Check if this PHY is configured for media swap. */
if (phy->id == M88E1112_E_PHY_ID) {
u16 data;
* This operation was still successful, and entry contains a valid update
* RTE_EFD_UPDATE_FAILED
* Either the EFD failed to find a suitable perfect hash or the group was full
- * This is a fatal error, and the table is now in an indeterminite state
+ * This is a fatal error, and the table is now in an indeterminate state
* RTE_EFD_UPDATE_NO_CHANGE
* Operation resulted in no change to the table (same value already exists)
* 0
/**
* Ethernet device TX queue information structure.
- * Used to retieve information about configured queue.
+ * Used to retrieve information about configured queue.
*/
struct rte_eth_txq_info {
struct rte_eth_txconf conf; /**< queue config parameters. */
typedef int (*rte_tm_node_wfq_weight_mode_update_t)(
struct rte_eth_dev *dev,
uint32_t node_id,
- int *wfq_weigth_mode,
+ int *wfq_weight_mode,
uint32_t n_sp_priorities,
struct rte_tm_error *error);
* This function creates a TCP/IPv4 reassembly table.
*
* @param socket_id
- * socket index for allocating TCP/IPv4 reassemblt table
+ * socket index for allocating TCP/IPv4 reassemble table
* @param max_flow_num
* the maximum number of flows in the TCP/IPv4 GRO table
* @param max_item_per_flow
* Before calling rte_gso_segment(), applications must set proper ol_flags
* for the packet. The GSO library uses the same macros as that of TSO.
* For example, set PKT_TX_TCP_SEG and PKT_TX_IPV4 in ol_flags to segment
- * a TCP/IPv4 packet. If rte_gso_segment() succceds, the PKT_TX_TCP_SEG
+ * a TCP/IPv4 packet. If rte_gso_segment() succeeds, the PKT_TX_TCP_SEG
* flag is removed for all GSO segments and the input packet.
*
* Each of the newly-created GSO segments is organized as a two-segment
}
/*
- * errorneous packet: either exceeed max allowed number of fragments,
+ * erroneous packet: either exceed max allowed number of fragments,
* or duplicate first/last fragment encountered.
*/
if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
struct rte_mbuf *mb; /**< fragment mbuf */
};
-/** @internal <src addr, dst_addr, id> to uniquely indetify fragmented datagram. */
+/** @internal <src addr, dst_addr, id> to uniquely identify fragmented datagram. */
struct ip_frag_key {
uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */
uint32_t id; /**< dst address */
uint32_t entry_mask; /**< hash value mask. */
uint32_t max_entries; /**< max entries allowed. */
uint32_t use_entries; /**< entries in use. */
- uint32_t bucket_entries; /**< hash assocaitivity. */
+ uint32_t bucket_entries; /**< hash associativity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
struct ip_frag_pkt *last; /**< last used entry. */
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
- * Pointer to mbuf for reassebled packet, or NULL if:
+ * Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
- /* update ipv4 header for the reassmebled packet */
+ /* update ipv4 header for the reassembled packet */
ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
* @param ip_hdr
* Pointer to the IPV4 header inside the fragment.
* @return
- * Pointer to mbuf for reassebled packet, or NULL if:
+ * Pointer to mbuf for reassembled packet, or NULL if:
* - an error occurred.
* - not all fragments of the packet are collected yet.
*/
*
* @param job
* Job object.
- * @param update_pedriod_cb
+ * @param update_period_cb
* Callback to set. If NULL restore default update function.
*/
void
/* Get an available slot from the pool */
slot = kni_memzone_pool_alloc();
if (!slot) {
- RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unusued ones.\n",
+ RTE_LOG(ERR, KNI, "Cannot allocate more KNI interfaces; increase the number of max_kni_ifaces(current %d) or release unused ones.\n",
kni_memzone_pool.max_ifaces);
return NULL;
}
phys[i] = va2pa(pkts[i]);
}
- /* No pkt mbuf alocated */
+ /* No pkt mbuf allocated */
if (i <= 0)
return;
* @param kni
* pointer to struct rte_kni.
* @param ops
- * ponter to struct rte_kni_ops.
+ * pointer to struct rte_kni_ops.
*
* @return
* On success: 0
}
/**
- * Get up to num elements from the fifo. Return the number actully read
+ * Get up to num elements from the fifo. Return the number actually read
*/
static inline unsigned
kni_fifo_get(struct rte_kni_fifo *fifo, void **data, unsigned num)
} while (0)
/**
- * Allocate an unitialized mbuf from mempool *mp*.
+ * Allocate an uninitialized mbuf from mempool *mp*.
*
* This function can be used by PMDs (especially in RX functions) to
- * allocate an unitialized mbuf. The driver is responsible of
+ * allocate an uninitialized mbuf. The driver is responsible of
* initializing all the required fields. See rte_pktmbuf_reset().
* For standard needs, prefer rte_pktmbuf_alloc().
*
* @param len
* The amount of bytes to read.
* @param buf
- * The buffer where data is copied if it is not contigous in mbuf
+ * The buffer where data is copied if it is not contiguous in mbuf
* data. Its length should be at least equal to the len parameter.
* @return
* The pointer to the data, either in the mbuf if it is contiguous,
* FOLD = XOR(T1, T2, DATA)
*
* @param data_block 16 byte data block
- * @param precomp precomputed rk1 constanst
+ * @param precomp precomputed rk1 constant
* @param fold running 16 byte folded data
*
* @return New 16 byte folded data
* @param data_block
* 16 byte data block
* @param precomp
- * Precomputed rk1 constanst
+ * Precomputed rk1 constant
* @param fold
* Current16 byte folded data
*
* @param off
* The offset in bytes to start the checksum.
* @param len
- * The length in bytes of the data to ckecksum.
+ * The length in bytes of the data to checksum.
* @param cksum
* A pointer to the checksum, filled on success.
* @return
}
static int
-pdump_regitser_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
+pdump_register_rx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
}
static int
-pdump_regitser_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
+pdump_register_tx_callbacks(uint16_t end_q, uint16_t port, uint16_t queue,
struct rte_ring *ring, struct rte_mempool *mp,
uint16_t operation)
{
/* register RX callback */
if (flags & RTE_PDUMP_FLAG_RX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_rx_q : queue + 1;
- ret = pdump_regitser_rx_callbacks(end_q, port, queue, ring, mp,
+ ret = pdump_register_rx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;
/* register TX callback */
if (flags & RTE_PDUMP_FLAG_TX) {
end_q = (queue == RTE_PDUMP_ALL_QUEUES) ? nb_tx_q : queue + 1;
- ret = pdump_regitser_tx_callbacks(end_q, port, queue, ring, mp,
+ ret = pdump_register_tx_callbacks(end_q, port, queue, ring, mp,
operation);
if (ret < 0)
return ret;
* @param keys
* Array containing table entry keys
* @param entries
- * Array containung new contents for every table entry identified by key
+ * Array containing new contents for every table entry identified by key
* @param n_keys
* Number of keys to add
* @param key_found
}
ret = 0;
- POWER_DEBUG_TRACE("%d frequencie(s) of lcore %u are available\n",
+ POWER_DEBUG_TRACE("%d frequency(s) of lcore %u are available\n",
count, pi->lcore_id);
out:
fclose(f);
}
RTE_LOG(INFO, POWER, "Initialized successfully for lcore %u "
- "power manamgement\n", lcore_id);
+ "power management\n", lcore_id);
rte_atomic32_cmpset(&(pi->state), POWER_ONGOING, POWER_USED);
return 0;
*
* @return
* - 1 on success with frequency changed.
- * - 0 on success without frequency chnaged.
+ * - 0 on success without frequency changed.
* - Negative on error.
*/
int rte_power_acpi_cpufreq_freq_min(unsigned lcore_id);
* -1 on error
* On error case, rte_errno will be set appropriately:
* - ENOSPC - Cannot move existing mbufs from reorder buffer to accommodate
- * ealry mbuf, but it can be accommodated by performing drain and then insert.
+ * early mbuf, but it can be accommodated by performing drain and then insert.
* - ERANGE - Too early or late mbuf which is vastly out of range of expected
- * window should be ingnored without any handling.
+ * window should be ignored without any handling.
*/
int
rte_reorder_insert(struct rte_reorder_buffer *b, struct rte_mbuf *mbuf);
/**
* @brief Generate random number for RED
*
- * Implemenetation based on:
+ * Implementation based on:
* http://software.intel.com/en-us/articles/fast-random-number-generator-on-the-intel-pentiumr-4-processor/
*
* 10 bit shift has been found through empirical tests (was 16).
* Now using basic math we compute 2^n:
* 2^(f+n) = 2^f * 2^n
* 2^f - we use lookup table
- * 2^n - can be replaced with bit shift right oeprations
+ * 2^n - can be replaced with bit shift right operations
*/
f = (n >> 6) & 0xf;
memcpy(stats, &s->stats, sizeof(struct rte_sched_subport_stats));
memset(&s->stats, 0, sizeof(struct rte_sched_subport_stats));
- /* Subport TC ovesubscription status */
+ /* Subport TC oversubscription status */
*tc_ov = s->tc_ov;
return 0;
/*
* Return a skiplist level for a new entry.
- * This probabalistically gives a level with p=1/4 that an entry at level n
+ * This probabilistically gives a level with p=1/4 that an entry at level n
* will also appear at level n+1.
*/
static uint32_t