X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_fdir.c;h=dd940cecb15cb5f923d2383257df2d8de15dc7f0;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=3a9e65651f2c48a548fbda29751608f96e52bfe4;hpb=2c6b19af78e3ae222aff022967142e1e27bcd05c;p=dpdk.git diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index 3a9e65651f..dd940cecb1 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -139,7 +139,6 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) rte_wmb(); /* Init the RX tail regieter. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, 0); I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); return err; @@ -526,7 +525,7 @@ i40e_set_flx_pld_cfg(struct i40e_pf *pf, flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | (num << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | (layer_idx * I40E_MAX_FLXPLD_FIED); - I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); } for (i = 0; i < num; i++) { @@ -649,22 +648,31 @@ i40e_fdir_configure(struct rte_eth_dev *dev) PMD_DRV_LOG(ERR, " invalid configuration arguments."); return -EINVAL; } - /* configure flex payload */ - for (i = 0; i < conf->nb_payloads; i++) - i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); - /* configure flex mask*/ - for (i = 0; i < conf->nb_flexmasks; i++) { - if (hw->mac.type == I40E_MAC_X722) { - /* get translated pctype value in fd pctype register */ - pctype = (enum i40e_filter_pctype)i40e_read_rx_ctl( - hw, I40E_GLQF_FD_PCTYPES( - (int)i40e_flowtype_to_pctype(pf->adapter, - conf->flex_mask[i].flow_type))); - } else - pctype = i40e_flowtype_to_pctype(pf->adapter, - conf->flex_mask[i].flow_type); - i40e_set_flex_mask_on_pctype(pf, pctype, &conf->flex_mask[i]); + if (!pf->support_multi_driver) { + /* configure flex payload */ + for (i = 0; i < conf->nb_payloads; i++) + i40e_set_flx_pld_cfg(pf, &conf->flex_set[i]); + /* configure flex mask*/ + for (i = 0; i < conf->nb_flexmasks; i++) { + if (hw->mac.type == I40E_MAC_X722) { + /* get pctype value in fd pctype register */ + pctype = (enum i40e_filter_pctype) + i40e_read_rx_ctl(hw, + I40E_GLQF_FD_PCTYPES( + (int)i40e_flowtype_to_pctype( + pf->adapter, + conf->flex_mask[i].flow_type))); + } else { + pctype = i40e_flowtype_to_pctype(pf->adapter, + conf->flex_mask[i].flow_type); + } + + i40e_set_flex_mask_on_pctype(pf, pctype, + &conf->flex_mask[i]); + } + } else { + PMD_DRV_LOG(ERR, "Not support flexible payload."); } return ret; @@ -677,7 +685,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, { static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; uint16_t *ether_type; - uint8_t len = 2 * sizeof(struct ether_addr); + uint8_t len = 2 * sizeof(struct rte_ether_addr); struct ipv4_hdr *ip; struct ipv6_hdr *ip6; static const uint8_t next_proto[] = { @@ -693,7 +701,7 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input, [RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] = IPPROTO_NONE, }; - raw_pkt += 2 * sizeof(struct ether_addr); + raw_pkt += 2 * sizeof(struct rte_ether_addr); if (vlan && fdir_input->flow_ext.vlan_tci) { rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); rte_memcpy(raw_pkt + sizeof(uint16_t), @@ -903,7 +911,7 @@ i40e_fdir_construct_pkt(struct i40e_pf *pf, */ if (fdir_input->flow.l2_flow.ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) - payload += sizeof(struct arp_hdr); + payload += sizeof(struct rte_arp_hdr); set_idx = I40E_FLXPLD_L2_IDX; break; default: @@ -951,7 +959,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, struct i40e_customized_pctype *cus_pctype = NULL; static uint8_t vlan_frame[] = {0x81, 0, 0, 0}; uint16_t *ether_type; - uint8_t len = 2 * sizeof(struct ether_addr); + uint8_t len = 2 * sizeof(struct rte_ether_addr); struct ipv4_hdr *ip; struct ipv6_hdr *ip6; uint8_t pctype = fdir_input->pctype; @@ -969,7 +977,7 @@ i40e_flow_fdir_fill_eth_ip_head(struct i40e_pf *pf, [I40E_FILTER_PCTYPE_NONF_IPV6_OTHER] = IPPROTO_NONE, }; - raw_pkt += 2 * sizeof(struct ether_addr); + raw_pkt += 2 * sizeof(struct rte_ether_addr); if (vlan && fdir_input->flow_ext.vlan_tci) { rte_memcpy(raw_pkt, vlan_frame, sizeof(vlan_frame)); rte_memcpy(raw_pkt + sizeof(uint16_t), @@ -1189,7 +1197,7 @@ i40e_flow_fdir_construct_pkt(struct i40e_pf *pf, */ if (fdir_input->flow.l2_flow.ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) - payload += sizeof(struct arp_hdr); + payload += sizeof(struct rte_arp_hdr); set_idx = I40E_FLXPLD_L2_IDX; } else if (fdir_input->flow_ext.customized_pctype) { /* If customized pctype is used */ @@ -1342,13 +1350,18 @@ i40e_check_fdir_programming_status(struct i40e_rx_queue *rxq) PMD_DRV_LOG(ERR, "invalid programming status" " reported, error = %u.", error); } else - PMD_DRV_LOG(ERR, "unknown programming status" + PMD_DRV_LOG(INFO, "unknown programming status" " reported, len = %d, id = %u.", len, id); rxdp->wb.qword1.status_error_len = 0; rxq->rx_tail++; if (unlikely(rxq->rx_tail == rxq->nb_rx_desc)) rxq->rx_tail = 0; + if (rxq->rx_tail == 0) + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + else + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->rx_tail - 1); } + return ret; } @@ -1591,8 +1604,15 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, if (add) { fdir_filter = rte_zmalloc("fdir_filter", sizeof(*fdir_filter), 0); + if (fdir_filter == NULL) { + PMD_DRV_LOG(ERR, "Failed to alloc memory."); + return -ENOMEM; + } + rte_memcpy(fdir_filter, &check_filter, sizeof(check_filter)); ret = i40e_sw_fdir_filter_insert(pf, fdir_filter); + if (ret < 0) + rte_free(fdir_filter); } else { ret = i40e_sw_fdir_filter_del(pf, &node->fdir.input); }