* block.
*
* RAMROD_CMD_ID_ETH_UPDATE
- * Used to update the state of the leading connection, usually to udpate
+ * Used to update the state of the leading connection, usually to update
* the RSS indirection table. Completes on the RCQ of the leading
* connection. (Not currently used under FreeBSD until OS support becomes
* available.)
* the RCQ of the leading connection.
*
* RAMROD_CMD_ID_ETH_CFC_DEL
- * Used when tearing down a conneciton prior to driver unload. Completes
+ * Used when tearing down a connection prior to driver unload. Completes
* on the RCQ of the leading connection (since the current connection
* has been completely removed from controller memory).
*
/*
* It's ok if the actual decrement is issued towards the memory
- * somewhere between the lock and unlock. Thus no more explict
+ * somewhere between the lock and unlock. Thus no more explicit
* memory barrier is needed.
*/
if (common) {
break;
case (RAMROD_CMD_ID_ETH_TERMINATE):
- PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] teminate ramrod", cid);
+ PMD_DRV_LOG(DEBUG, sc, "got MULTI[%d] terminate ramrod", cid);
drv_cmd = ECORE_Q_CMD_TERMINATE;
break;
case BNX2X_RX_MODE_ALLMULTI_PROMISC:
case BNX2X_RX_MODE_PROMISC:
/*
- * According to deffinition of SI mode, iface in promisc mode
+ * According to definition of SI mode, iface in promisc mode
* should receive matched and unmatched (in resolution of port)
* unicast packets.
*/
/*
* Cleans the object that have internal lists without sending
- * ramrods. Should be run when interrutps are disabled.
+ * ramrods. Should be run when interrupts are disabled.
*/
static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
{
/*
* Nothing to do during unload if previous bnx2x_nic_load()
- * did not completed successfully - all resourses are released.
+ * did not complete successfully - all resources are released.
*/
if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
return 0;
/*
* Prevent transactions to host from the functions on the
* engine that doesn't reset global blocks in case of global
- * attention once gloabl blocks are reset and gates are opened
+ * attention once global blocks are reset and gates are opened
* (the engine which leader will perform the recovery
* last).
*/
/*
* At this stage no more interrupts will arrive so we may safely clean
- * the queue'able objects here in case they failed to get cleaned so far.
+ * the queueable objects here in case they failed to get cleaned so far.
*/
if (IS_PF(sc)) {
bnx2x_squeeze_objects(sc);
}
/*
- * Encapsulte an mbuf cluster into the tx bd chain and makes the memory
+ * Encapsulate an mbuf cluster into the Tx BD chain and makes the memory
* visible to the controller.
*
* If an mbuf is submitted to this routine and cannot be given to the
tx_start_bd->nbd = rte_cpu_to_le_16(2);
- if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+ if (m0->ol_flags & RTE_MBUF_F_TX_VLAN) {
tx_start_bd->vlan_or_ethertype =
rte_cpu_to_le_16(m0->vlan_tci);
tx_start_bd->bd_flags.as_bitfield |=
tx_parse_bd =
&txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
- if (rte_is_multicast_ether_addr(&eh->d_addr)) {
- if (rte_is_broadcast_ether_addr(&eh->d_addr))
+ if (rte_is_multicast_ether_addr(&eh->dst_addr)) {
+ if (rte_is_broadcast_ether_addr(&eh->dst_addr))
mac_type = BROADCAST_ADDRESS;
else
mac_type = MULTICAST_ADDRESS;
(mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
- &eh->d_addr.addr_bytes[0], 2);
+ &eh->dst_addr.addr_bytes[0], 2);
rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
- &eh->d_addr.addr_bytes[2], 2);
+ &eh->dst_addr.addr_bytes[2], 2);
rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
- &eh->d_addr.addr_bytes[4], 2);
+ &eh->dst_addr.addr_bytes[4], 2);
rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
- &eh->s_addr.addr_bytes[0], 2);
+ &eh->src_addr.addr_bytes[0], 2);
rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
- &eh->s_addr.addr_bytes[2], 2);
+ &eh->src_addr.addr_bytes[2], 2);
rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
- &eh->s_addr.addr_bytes[4], 2);
+ &eh->src_addr.addr_bytes[4], 2);
tx_parse_bd->data.mac_addr.dst_hi =
rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
return val1 != 0;
}
-/* send load requrest to mcp and analyze response */
+/* send load request to MCP and analyze response */
static int bnx2x_nic_load_request(struct bnx2x_softc *sc, uint32_t * load_code)
{
PMD_INIT_FUNC_TRACE(sc);
* sum of vn_min_rates.
* or
* 0 - if all the min_rates are 0.
- * In the later case fainess algorithm should be deactivated.
+ * In the later case fairness algorithm should be deactivated.
* If all min rates are not zero then those that are zeroes will be set to 1.
*/
static void bnx2x_calc_vn_min(struct bnx2x_softc *sc, struct cmng_init_input *input)
txq_init->fw_sb_id = fp->fw_sb_id;
/*
- * set the TSS leading client id for TX classfication to the
+ * set the TSS leading client id for Tx classification to the
* leading RSS client id
*/
txq_init->tss_leading_cl_id = BNX2X_FP(sc, 0, cl_id);
}
/*
-* Walk the PCI capabiites list for the device to find what features are
-* supported. These capabilites may be enabled/disabled by firmware so it's
+* Walk the PCI capabilities list for the device to find what features are
+* supported. These capabilities may be enabled/disabled by firmware so it's
* best to walk the list rather than make assumptions.
*/
static void bnx2x_probe_pci_caps(struct bnx2x_softc *sc)
} else {
sc->devinfo.int_block = INT_BLOCK_IGU;
-/* do not allow device reset during IGU info preocessing */
+/* do not allow device reset during IGU info processing */
bnx2x_acquire_hw_lock(sc, HW_LOCK_RESOURCE_RESET);
val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
- /* get PCI capabilites */
+ /* get PCI capabilities */
bnx2x_probe_pci_caps(sc);
if (sc->devinfo.pcie_msix_cap_reg != 0) {
* stay set)
* f. If this is VNIC 3 of a port then also init
* first_timers_ilt_entry to zero and last_timers_ilt_entry
- * to the last enrty in the ILT.
+ * to the last entry in the ILT.
*
* Notes:
* Currently the PF error in the PGLC is non recoverable.
/**
* bnx2x_pf_flr_clnup
* a. re-enable target read on the PF
- * b. poll cfc per function usgae counter
+ * b. poll cfc per function usage counter
* c. poll the qm perfunction usage counter
* d. poll the tm per function usage counter
* e. poll the tm per function scan-done indication