net/qede/base: improve Tx-switching performance
[dpdk.git] / drivers / net / qede / base / ecore_l2.c
index b1190e4..22bb43d 100644 (file)
@@ -36,9 +36,9 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
        struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
+       u16 rx_mode = 0, tx_err = 0;
        u8 abs_vport_id = 0;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
-       u16 rx_mode = 0;
 
        rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc != ECORE_SUCCESS)
@@ -71,6 +71,30 @@ ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
 
        p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
 
+       /* Handle requests for strict behavior on transmission errors */
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
+                 p_params->b_err_illegal_vlan_mode ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
+                 p_params->b_err_small_pkt ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
+                 p_params->b_err_anti_spoof ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
+                 p_params->b_err_illegal_inband_mode ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
+                 p_params->b_err_vlan_insert_with_inband ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
+                 p_params->b_err_big_pkt ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
+                 p_params->b_err_ctrl_frame ?
+                 ETH_TX_ERR_ASSERT_MALICIOUS : 0);
+       p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
+
        /* TPA related fields */
        OSAL_MEMSET(&p_ramrod->tpa_param, 0,
                    sizeof(struct eth_vport_tpa_param));
@@ -427,6 +451,11 @@ ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
        ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
        ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
                                      p_params->sge_tpa_params);
+       if (p_params->mtu) {
+               p_ramrod->common.update_mtu_flg = 1;
+               p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
+       }
+
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
@@ -543,12 +572,7 @@ enum _ecore_status_t
 ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
                              u16 opaque_fid,
                              u32 cid,
-                             u16 rx_queue_id,
-                             u8 vf_rx_queue_id,
-                             u8 vport_id,
-                             u8 stats_id,
-                             u16 sb,
-                             u8 sb_index,
+                             struct ecore_queue_start_common_params *p_params,
                              u16 bd_max_bytes,
                              dma_addr_t bd_chain_phys_addr,
                              dma_addr_t cqe_pbl_addr,
@@ -563,22 +587,23 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
        enum _ecore_status_t rc = ECORE_NOTIMPL;
 
        /* Store information for the stop */
-       p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
        p_rx_cid->cid = cid;
        p_rx_cid->opaque_fid = opaque_fid;
-       p_rx_cid->vport_id = vport_id;
+       p_rx_cid->vport_id = p_params->vport_id;
 
-       rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id);
+       rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_rx_q_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                   "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-                  opaque_fid, cid, rx_queue_id, vport_id, sb);
+                  opaque_fid, cid, p_params->queue_id,
+                  p_params->vport_id, p_params->sb);
 
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -594,10 +619,10 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
 
        p_ramrod = &p_ent->ramrod.rx_queue_start;
 
-       p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
-       p_ramrod->sb_index = sb_index;
+       p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+       p_ramrod->sb_index = (u8)p_params->sb_idx;
        p_ramrod->vport_id = abs_vport_id;
-       p_ramrod->stats_counter_id = stats_id;
+       p_ramrod->stats_counter_id = p_params->stats_id;
        p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id);
        p_ramrod->complete_cqe_flg = 0;
        p_ramrod->complete_event_flg = 1;
@@ -608,30 +633,27 @@ ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
        p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
        DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
 
-       if (vf_rx_queue_id || b_use_zone_a_prod) {
-               p_ramrod->vf_rx_prod_index = vf_rx_queue_id;
+       if (p_params->vf_qid || b_use_zone_a_prod) {
+               p_ramrod->vf_rx_prod_index = (u8)p_params->vf_qid;
                DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                           "Queue%s is meant for VF rxq[%02x]\n",
                           b_use_zone_a_prod ? " [legacy]" : "",
-                          vf_rx_queue_id);
+                          p_params->vf_qid);
                p_ramrod->vf_rx_prod_use_zone_a = b_use_zone_a_prod;
        }
 
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
-                                                u16 opaque_fid,
-                                                u8 rx_queue_id,
-                                                u8 vport_id,
-                                                u8 stats_id,
-                                                u16 sb,
-                                                u8 sb_index,
-                                                u16 bd_max_bytes,
-                                                dma_addr_t bd_chain_phys_addr,
-                                                dma_addr_t cqe_pbl_addr,
-                                                u16 cqe_pbl_size,
-                                                void OSAL_IOMEM **pp_prod)
+enum _ecore_status_t
+ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           struct ecore_queue_start_common_params *p_params,
+                           u16 bd_max_bytes,
+                           dma_addr_t bd_chain_phys_addr,
+                           dma_addr_t cqe_pbl_addr,
+                           u16 cqe_pbl_size,
+                           void OSAL_IOMEM * *pp_prod)
 {
        struct ecore_hw_cid_data *p_rx_cid;
        u32 init_prod_val = 0;
@@ -641,20 +663,20 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
 
        if (IS_VF(p_hwfn->p_dev)) {
                return ecore_vf_pf_rxq_start(p_hwfn,
-                                            rx_queue_id,
-                                            sb,
-                                            sb_index,
+                                            (u8)p_params->queue_id,
+                                            p_params->sb,
+                                            (u8)p_params->sb_idx,
                                             bd_max_bytes,
                                             bd_chain_phys_addr,
                                             cqe_pbl_addr,
                                             cqe_pbl_size, pp_prod);
        }
 
-       rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue);
+       rc = ecore_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_l2_queue);
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+       rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
@@ -667,7 +689,7 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
                          (u32 *)(&init_prod_val));
 
        /* Allocate a CID for the queue */
-       p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
+       p_rx_cid = &p_hwfn->p_rx_cids[p_params->queue_id];
        rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
                                   &p_rx_cid->cid);
        if (rc != ECORE_SUCCESS) {
@@ -675,16 +697,13 @@ enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
                return rc;
        }
        p_rx_cid->b_cid_allocated = true;
+       p_params->stats_id = abs_stats_id;
+       p_params->vf_qid = 0;
 
        rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn,
                                           opaque_fid,
                                           p_rx_cid->cid,
-                                          rx_queue_id,
-                                          0,
-                                          vport_id,
-                                          abs_stats_id,
-                                          sb,
-                                          sb_index,
+                                          p_params,
                                           bd_max_bytes,
                                           bd_chain_phys_addr,
                                           cqe_pbl_addr,
@@ -811,12 +830,8 @@ ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
 enum _ecore_status_t
 ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
                              u16 opaque_fid,
-                             u16 tx_queue_id,
                              u32 cid,
-                             u8 vport_id,
-                             u8 stats_id,
-                             u16 sb,
-                             u8 sb_index,
+                             struct ecore_queue_start_common_params *p_params,
                              dma_addr_t pbl_addr,
                              u16 pbl_size,
                              union ecore_qm_pq_params *p_pq_params)
@@ -825,20 +840,20 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
        struct ecore_hw_cid_data *p_tx_cid;
-       u16 pq_id, abs_tx_q_id = 0;
-       u8 abs_vport_id;
+       u16 pq_id, abs_tx_qzone_id = 0;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
+       u8 abs_vport_id;
 
        /* Store information for the stop */
-       p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
        p_tx_cid->cid = cid;
        p_tx_cid->opaque_fid = opaque_fid;
 
-       rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
+       rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id);
+       rc = ecore_fw_l2_queue(p_hwfn, p_params->qzone_id, &abs_tx_qzone_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
@@ -857,11 +872,12 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
        p_ramrod = &p_ent->ramrod.tx_queue_start;
        p_ramrod->vport_id = abs_vport_id;
 
-       p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb);
-       p_ramrod->sb_index = sb_index;
-       p_ramrod->stats_counter_id = stats_id;
+       p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_params->sb);
+       p_ramrod->sb_index = (u8)p_params->sb_idx;
+       p_ramrod->stats_counter_id = p_params->stats_id;
 
-       p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id);
+       p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
+       p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(abs_tx_qzone_id);
 
        p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
        DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@ -872,17 +888,14 @@ ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
-enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
-                                                u16 opaque_fid,
-                                                u16 tx_queue_id,
-                                                u8 vport_id,
-                                                u8 stats_id,
-                                                u16 sb,
-                                                u8 sb_index,
-                                                u8 tc,
-                                                dma_addr_t pbl_addr,
-                                                u16 pbl_size,
-                                                void OSAL_IOMEM **pp_doorbell)
+enum _ecore_status_t
+ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
+                           u16 opaque_fid,
+                           struct ecore_queue_start_common_params *p_params,
+                           u8 tc,
+                           dma_addr_t pbl_addr,
+                           u16 pbl_size,
+                           void OSAL_IOMEM * *pp_doorbell)
 {
        struct ecore_hw_cid_data *p_tx_cid;
        union ecore_qm_pq_params pq_params;
@@ -891,19 +904,19 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
        if (IS_VF(p_hwfn->p_dev)) {
                return ecore_vf_pf_txq_start(p_hwfn,
-                                            tx_queue_id,
-                                            sb,
-                                            sb_index,
+                                            p_params->queue_id,
+                                            p_params->sb,
+                                            (u8)p_params->sb_idx,
                                             pbl_addr,
                                             pbl_size,
                                             pp_doorbell);
        }
 
-       rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id);
+       rc = ecore_fw_vport(p_hwfn, p_params->stats_id, &abs_stats_id);
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
+       p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
        OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid));
        OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
 
@@ -919,18 +932,16 @@ enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
                   "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n",
-                   opaque_fid, p_tx_cid->cid, tx_queue_id,
-                   vport_id, sb);
+                   opaque_fid, p_tx_cid->cid, p_params->queue_id,
+                   p_params->vport_id, p_params->sb);
+
+       p_params->stats_id = abs_stats_id;
 
        /* TODO - set tc in the pq_params for multi-cos */
        rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
                                           opaque_fid,
-                                          tx_queue_id,
                                           p_tx_cid->cid,
-                                          vport_id,
-                                          abs_stats_id,
-                                          sb,
-                                          sb_index,
+                                          p_params,
                                           pbl_addr,
                                           pbl_size,
                                           &pq_params);
@@ -1002,17 +1013,6 @@ ecore_filter_action(enum ecore_filter_opcode opcode)
        return action;
 }
 
-static void ecore_set_fw_mac_addr(__le16 *fw_msb,
-                                 __le16 *fw_mid, __le16 *fw_lsb, u8 *mac)
-{
-       ((u8 *)fw_msb)[0] = mac[1];
-       ((u8 *)fw_msb)[1] = mac[0];
-       ((u8 *)fw_mid)[0] = mac[3];
-       ((u8 *)fw_mid)[1] = mac[2];
-       ((u8 *)fw_lsb)[0] = mac[5];
-       ((u8 *)fw_lsb)[1] = mac[4];
-}
-
 static enum _ecore_status_t
 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
                          u16 opaque_fid,
@@ -1107,6 +1107,9 @@ ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
        case ECORE_FILTER_VNI:
                p_first_filter->type = ETH_FILTER_TYPE_VNI;
                break;
+       case ECORE_FILTER_UNUSED: /* @DPDK */
+               p_first_filter->type = MAX_ETH_FILTER_TYPE;
+               break;
        }
 
        if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
@@ -1583,51 +1586,51 @@ static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
                          OFFSETOF(struct public_port, stats),
                          sizeof(port_stats));
 
-       p_stats->rx_64_byte_packets += port_stats.pmm.r64;
-       p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
-       p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
-       p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
-       p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
-       p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
-       p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
-       p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
-       p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
-       p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
-       p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
-       p_stats->rx_crc_errors += port_stats.pmm.rfcs;
-       p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
-       p_stats->rx_pause_frames += port_stats.pmm.rxpf;
-       p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
-       p_stats->rx_align_errors += port_stats.pmm.raln;
-       p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
-       p_stats->rx_oversize_packets += port_stats.pmm.rovr;
-       p_stats->rx_jabbers += port_stats.pmm.rjbr;
-       p_stats->rx_undersize_packets += port_stats.pmm.rund;
-       p_stats->rx_fragments += port_stats.pmm.rfrg;
-       p_stats->tx_64_byte_packets += port_stats.pmm.t64;
-       p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
-       p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
-       p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
-       p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
-       p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
-       p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
-       p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
-       p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
-       p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
-       p_stats->tx_pause_frames += port_stats.pmm.txpf;
-       p_stats->tx_pfc_frames += port_stats.pmm.txpp;
-       p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
-       p_stats->tx_total_collisions += port_stats.pmm.tncl;
-       p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
-       p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
-       p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
-       p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
-       p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
-       p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
-       p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
-       p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
-       p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
-       p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+       p_stats->rx_64_byte_packets += port_stats.eth.r64;
+       p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+       p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+       p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+       p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+       p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+       p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+       p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+       p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+       p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+       p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+       p_stats->rx_crc_errors += port_stats.eth.rfcs;
+       p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+       p_stats->rx_pause_frames += port_stats.eth.rxpf;
+       p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+       p_stats->rx_align_errors += port_stats.eth.raln;
+       p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+       p_stats->rx_oversize_packets += port_stats.eth.rovr;
+       p_stats->rx_jabbers += port_stats.eth.rjbr;
+       p_stats->rx_undersize_packets += port_stats.eth.rund;
+       p_stats->rx_fragments += port_stats.eth.rfrg;
+       p_stats->tx_64_byte_packets += port_stats.eth.t64;
+       p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+       p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+       p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+       p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+       p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+       p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+       p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+       p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+       p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+       p_stats->tx_pause_frames += port_stats.eth.txpf;
+       p_stats->tx_pfc_frames += port_stats.eth.txpp;
+       p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+       p_stats->tx_total_collisions += port_stats.eth.tncl;
+       p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+       p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+       p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+       p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+       p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+       p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+       p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+       p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+       p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+       p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
        for (j = 0; j < 8; j++) {
                p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
                p_stats->brb_discards += port_stats.brb.brb_discard[j];