X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_rxtx.c;h=950b7894e0979d07d928a5ba90cabee6cc022b3b;hb=731fa4003b1162da230a805abfc4f3409a9653d0;hp=d1d3baff903834910d0030732314c0ca9415958f;hpb=a8d0d473a0a89b3c50813e3e144e9a5377429f24;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index d1d3baff90..950b7894e0 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -33,7 +33,8 @@ #include #include #include -#include +#include +#include #include #include #include @@ -694,7 +695,7 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (use_ipsec) { union ixgbe_crypto_tx_desc_md *ipsec_mdata = (union ixgbe_crypto_tx_desc_md *) - &tx_pkt->udata64; + rte_security_dynfield(tx_pkt); tx_offload.sa_idx = ipsec_mdata->sa_idx; tx_offload.sec_pad_len = ipsec_mdata->pad_len; } @@ -859,7 +860,8 @@ ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, - tx_offload, &tx_pkt->udata64); + tx_offload, + rte_security_dynfield(tx_pkt)); txe->last_id = tx_last; tx_id = txe->next_id; @@ -1367,6 +1369,31 @@ const uint32_t RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, }; +int +ixgbe_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_queue *rxq = rx_queue; + uint16_t desc; + + desc = rxq->rx_tail; + rxdp = &rxq->rx_ring[desc]; + /* watch for changes in status bit */ + pmc->addr = &rxdp->wb.upper.status_error; + + /* + * we expect the DD bit to be set to 1 if this descriptor was already + * written to. + */ + pmc->val = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD); + pmc->mask = rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD); + + /* the registers are 32-bit */ + pmc->size = sizeof(uint32_t); + + return 0; +} + /* @note: fix ixgbe_dev_supported_ptypes_get() if any change here. */ static inline uint32_t ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) @@ -1439,7 +1466,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) } static inline uint64_t -rx_desc_error_to_pkt_flags(uint32_t rx_status) +rx_desc_error_to_pkt_flags(uint32_t rx_status, uint16_t pkt_info, + uint8_t rx_udp_csum_zero_err) { uint64_t pkt_flags; @@ -1456,6 +1484,15 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) pkt_flags = error_to_pkt_flags_map[(rx_status >> IXGBE_RXDADV_ERR_CKSUM_BIT) & IXGBE_RXDADV_ERR_CKSUM_MSK]; + /* Mask out the bad UDP checksum error if the hardware has UDP zero + * checksum error issue, so that the software application will then + * have to recompute the checksum itself if needed. + */ + if ((rx_status & IXGBE_RXDADV_ERR_TCPE) && + (pkt_info & IXGBE_RXDADV_PKTTYPE_UDP) && + rx_udp_csum_zero_err) + pkt_flags &= ~PKT_RX_L4_CKSUM_BAD; + if ((rx_status & IXGBE_RXD_STAT_OUTERIPCS) && (rx_status & IXGBE_RXDADV_ERR_OUTERIPER)) { pkt_flags |= PKT_RX_EIP_CKSUM_BAD; @@ -1542,7 +1579,9 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) /* convert descriptor fields to rte mbuf flags */ pkt_flags = rx_desc_status_to_pkt_flags(s[j], vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); + pkt_flags |= rx_desc_error_to_pkt_flags(s[j], + (uint16_t)pkt_info[j], + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags ((uint16_t)pkt_info[j]); mb->ol_flags = pkt_flags; @@ -1875,7 +1914,9 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); - pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); + pkt_flags = pkt_flags | + rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags = pkt_flags | ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); rxm->ol_flags = pkt_flags; @@ -1968,7 +2009,8 @@ ixgbe_fill_cluster_head_buf( head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); - pkt_flags |= rx_desc_error_to_pkt_flags(staterr); + pkt_flags |= rx_desc_error_to_pkt_flags(staterr, (uint16_t)pkt_info, + rxq->rx_udp_csum_zero_err); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); head->ol_flags = pkt_flags; head->packet_type = @@ -3089,6 +3131,13 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, else rxq->pkt_type_mask = IXGBE_PACKET_TYPE_MASK_82599; + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as checksum + * errors. + */ + if (hw->mac.type == ixgbe_mac_82599EB) + rxq->rx_udp_csum_zero_err = 1; + /* * Allocate RX ring hardware descriptors. A memzone large enough to * handle the maximum ring size is allocated in order to allow for @@ -4896,15 +4945,11 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); if ((rsc_capable) && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) - /* - * Since NFS packets coalescing is not supported - clear - * RFCTL.NFSW_DIS and RFCTL.NFSR_DIS when RSC is - * enabled. - */ - rfctl &= ~(IXGBE_RFCTL_RSC_DIS | IXGBE_RFCTL_NFSW_DIS | - IXGBE_RFCTL_NFSR_DIS); + rfctl &= ~IXGBE_RFCTL_RSC_DIS; else rfctl |= IXGBE_RFCTL_RSC_DIS; + /* disable NFS filtering */ + rfctl |= IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS; IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); /* If LRO hasn't been requested - we are done here. */ @@ -5632,8 +5677,12 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev) * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way, * VF packets received can work in all cases. */ - ixgbevf_rlpml_set_vf(hw, - (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len); + if (ixgbevf_rlpml_set_vf(hw, + (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) { + PMD_INIT_LOG(ERR, "Set max packet length to %d failed.", + dev->data->dev_conf.rxmode.max_rx_pkt_len); + return -EINVAL; + } /* * Assume no header split and no VLAN strip support @@ -5920,7 +5969,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev, return 0; } -/* Stubs needed for linkage when CONFIG_RTE_ARCH_PPC_64 is set */ +/* Stubs needed for linkage when RTE_ARCH_PPC_64 is set */ #if defined(RTE_ARCH_PPC_64) int ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)