net/txgbe: support syn filter add and delete
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index f8dffe1..e8362c0 100644 (file)
@@ -109,6 +109,8 @@ static void txgbe_dev_interrupt_handler(void *param);
 static void txgbe_dev_interrupt_delayed_handler(void *param);
 static void txgbe_configure_msix(struct rte_eth_dev *dev);
 
+static int txgbe_filter_restore(struct rte_eth_dev *dev);
+
 #define TXGBE_SET_HWSTRIP(h, q) do {\
                uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
                uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
@@ -470,6 +472,7 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
        struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
        struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        const struct rte_memzone *mz;
@@ -677,6 +680,13 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* enable support intr */
        txgbe_enable_intr(eth_dev);
 
+       /* initialize filter info */
+       memset(filter_info, 0,
+              sizeof(struct txgbe_filter_info));
+
+       /* initialize 5tuple filter list */
+       TAILQ_INIT(&filter_info->fivetuple_list);
+
        /* initialize bandwidth configuration info */
        memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
 
@@ -696,6 +706,23 @@ eth_txgbe_dev_uninit(struct rte_eth_dev *eth_dev)
        return 0;
 }
 
+static int txgbe_ntuple_filter_uninit(struct rte_eth_dev *eth_dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(eth_dev);
+       struct txgbe_5tuple_filter *p_5tuple;
+
+       while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list))) {
+               TAILQ_REMOVE(&filter_info->fivetuple_list,
+                            p_5tuple,
+                            entries);
+               rte_free(p_5tuple);
+       }
+       memset(filter_info->fivetuple_mask, 0,
+              sizeof(uint32_t) * TXGBE_5TUPLE_ARRAY_SIZE);
+
+       return 0;
+}
+
 static int
 eth_txgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
                struct rte_pci_device *pci_dev)
@@ -1586,6 +1613,7 @@ skip_link_setup:
 
        /* resume enabled intr since hw reset */
        txgbe_enable_intr(dev);
+       txgbe_filter_restore(dev);
 
        /*
         * Update link status right before return, because it may
@@ -1774,6 +1802,9 @@ txgbe_dev_close(struct rte_eth_dev *dev)
        rte_free(dev->data->hash_mac_addrs);
        dev->data->hash_mac_addrs = NULL;
 
+       /* Remove all ntuple filters of the device */
+       txgbe_ntuple_filter_uninit(dev);
+
        return ret;
 }
 
@@ -3480,6 +3511,426 @@ txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
        return 0;
 }
 
+int
+txgbe_syn_filter_set(struct rte_eth_dev *dev,
+                       struct rte_eth_syn_filter *filter,
+                       bool add)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t syn_info;
+       uint32_t synqf;
+
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       syn_info = filter_info->syn_info;
+
+       if (add) {
+               if (syn_info & TXGBE_SYNCLS_ENA)
+                       return -EINVAL;
+               synqf = (uint32_t)TXGBE_SYNCLS_QPID(filter->queue);
+               synqf |= TXGBE_SYNCLS_ENA;
+
+               if (filter->hig_pri)
+                       synqf |= TXGBE_SYNCLS_HIPRIO;
+               else
+                       synqf &= ~TXGBE_SYNCLS_HIPRIO;
+       } else {
+               synqf = rd32(hw, TXGBE_SYNCLS);
+               if (!(syn_info & TXGBE_SYNCLS_ENA))
+                       return -ENOENT;
+               synqf &= ~(TXGBE_SYNCLS_QPID_MASK | TXGBE_SYNCLS_ENA);
+       }
+
+       filter_info->syn_info = synqf;
+       wr32(hw, TXGBE_SYNCLS, synqf);
+       txgbe_flush(hw);
+       return 0;
+}
+
+static inline enum txgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+       if (protocol_value == IPPROTO_TCP)
+               return TXGBE_5TF_PROT_TCP;
+       else if (protocol_value == IPPROTO_UDP)
+               return TXGBE_5TF_PROT_UDP;
+       else if (protocol_value == IPPROTO_SCTP)
+               return TXGBE_5TF_PROT_SCTP;
+       else
+               return TXGBE_5TF_PROT_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+txgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+                          struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       int i;
+       uint32_t ftqf, sdpqf;
+       uint32_t l34timir = 0;
+       uint32_t mask = TXGBE_5TFCTL0_MASK;
+
+       i = filter->index;
+       sdpqf = TXGBE_5TFPORT_DST(be_to_le16(filter->filter_info.dst_port));
+       sdpqf |= TXGBE_5TFPORT_SRC(be_to_le16(filter->filter_info.src_port));
+
+       ftqf = TXGBE_5TFCTL0_PROTO(filter->filter_info.proto);
+       ftqf |= TXGBE_5TFCTL0_PRI(filter->filter_info.priority);
+       if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+               mask &= ~TXGBE_5TFCTL0_MSADDR;
+       if (filter->filter_info.dst_ip_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MDADDR;
+       if (filter->filter_info.src_port_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MSPORT;
+       if (filter->filter_info.dst_port_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MDPORT;
+       if (filter->filter_info.proto_mask == 0)
+               mask &= ~TXGBE_5TFCTL0_MPROTO;
+       ftqf |= mask;
+       ftqf |= TXGBE_5TFCTL0_MPOOL;
+       ftqf |= TXGBE_5TFCTL0_ENA;
+
+       wr32(hw, TXGBE_5TFDADDR(i), be_to_le32(filter->filter_info.dst_ip));
+       wr32(hw, TXGBE_5TFSADDR(i), be_to_le32(filter->filter_info.src_ip));
+       wr32(hw, TXGBE_5TFPORT(i), sdpqf);
+       wr32(hw, TXGBE_5TFCTL0(i), ftqf);
+
+       l34timir |= TXGBE_5TFCTL1_QP(filter->queue);
+       wr32(hw, TXGBE_5TFCTL1(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: pointer to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+static int
+txgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+                       struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i, idx, shift;
+
+       /*
+        * look for an unused 5tuple filter index,
+        * and insert the filter to list.
+        */
+       for (i = 0; i < TXGBE_MAX_FTQF_FILTERS; i++) {
+               idx = i / (sizeof(uint32_t) * NBBY);
+               shift = i % (sizeof(uint32_t) * NBBY);
+               if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+                       filter_info->fivetuple_mask[idx] |= 1 << shift;
+                       filter->index = i;
+                       TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+                                         filter,
+                                         entries);
+                       break;
+               }
+       }
+       if (i >= TXGBE_MAX_FTQF_FILTERS) {
+               PMD_DRV_LOG(ERR, "5tuple filters are full.");
+               return -ENOSYS;
+       }
+
+       txgbe_inject_5tuple_filter(dev, filter);
+
+       return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+txgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+                       struct txgbe_5tuple_filter *filter)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint16_t index = filter->index;
+
+       filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+                               ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+       TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+       rte_free(filter);
+
+       wr32(hw, TXGBE_5TFDADDR(index), 0);
+       wr32(hw, TXGBE_5TFSADDR(index), 0);
+       wr32(hw, TXGBE_5TFPORT(index), 0);
+       wr32(hw, TXGBE_5TFCTL0(index), 0);
+       wr32(hw, TXGBE_5TFCTL1(index), 0);
+}
+
+static inline struct txgbe_5tuple_filter *
+txgbe_5tuple_filter_lookup(struct txgbe_5tuple_filter_list *filter_list,
+                       struct txgbe_5tuple_filter_info *key)
+{
+       struct txgbe_5tuple_filter *it;
+
+       TAILQ_FOREACH(it, filter_list, entries) {
+               if (memcmp(key, &it->filter_info,
+                       sizeof(struct txgbe_5tuple_filter_info)) == 0) {
+                       return it;
+               }
+       }
+       return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter
+ * to struct txgbe_5tuple_filter_info
+ */
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+                       struct txgbe_5tuple_filter_info *filter_info)
+{
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM ||
+               filter->priority > TXGBE_5TUPLE_MAX_PRI ||
+               filter->priority < TXGBE_5TUPLE_MIN_PRI)
+               return -EINVAL;
+
+       switch (filter->dst_ip_mask) {
+       case UINT32_MAX:
+               filter_info->dst_ip_mask = 0;
+               filter_info->dst_ip = filter->dst_ip;
+               break;
+       case 0:
+               filter_info->dst_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_ip_mask) {
+       case UINT32_MAX:
+               filter_info->src_ip_mask = 0;
+               filter_info->src_ip = filter->src_ip;
+               break;
+       case 0:
+               filter_info->src_ip_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->dst_port_mask) {
+       case UINT16_MAX:
+               filter_info->dst_port_mask = 0;
+               filter_info->dst_port = filter->dst_port;
+               break;
+       case 0:
+               filter_info->dst_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->src_port_mask) {
+       case UINT16_MAX:
+               filter_info->src_port_mask = 0;
+               filter_info->src_port = filter->src_port;
+               break;
+       case 0:
+               filter_info->src_port_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid src_port mask.");
+               return -EINVAL;
+       }
+
+       switch (filter->proto_mask) {
+       case UINT8_MAX:
+               filter_info->proto_mask = 0;
+               filter_info->proto =
+                       convert_protocol_type(filter->proto);
+               break;
+       case 0:
+               filter_info->proto_mask = 1;
+               break;
+       default:
+               PMD_DRV_LOG(ERR, "invalid protocol mask.");
+               return -EINVAL;
+       }
+
+       filter_info->priority = (uint8_t)filter->priority;
+       return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ *    - On success, zero.
+ *    - On failure, a negative value.
+ */
+int
+txgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ntuple_filter *ntuple_filter,
+                       bool add)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter_info filter_5tuple;
+       struct txgbe_5tuple_filter *filter;
+       int ret;
+
+       if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+               PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+               return -EINVAL;
+       }
+
+       memset(&filter_5tuple, 0, sizeof(struct txgbe_5tuple_filter_info));
+       ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+       if (ret < 0)
+               return ret;
+
+       filter = txgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+                                        &filter_5tuple);
+       if (filter != NULL && add) {
+               PMD_DRV_LOG(ERR, "filter exists.");
+               return -EEXIST;
+       }
+       if (filter == NULL && !add) {
+               PMD_DRV_LOG(ERR, "filter doesn't exist.");
+               return -ENOENT;
+       }
+
+       if (add) {
+               filter = rte_zmalloc("txgbe_5tuple_filter",
+                               sizeof(struct txgbe_5tuple_filter), 0);
+               if (filter == NULL)
+                       return -ENOMEM;
+               rte_memcpy(&filter->filter_info,
+                                &filter_5tuple,
+                                sizeof(struct txgbe_5tuple_filter_info));
+               filter->queue = ntuple_filter->queue;
+               ret = txgbe_add_5tuple_filter(dev, filter);
+               if (ret < 0) {
+                       rte_free(filter);
+                       return ret;
+               }
+       } else {
+               txgbe_remove_5tuple_filter(dev, filter);
+       }
+
+       return 0;
+}
+
+int
+txgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+                       struct rte_eth_ethertype_filter *filter,
+                       bool add)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t etqf = 0;
+       uint32_t etqs = 0;
+       int ret;
+       struct txgbe_ethertype_filter ethertype_filter;
+
+       if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM)
+               return -EINVAL;
+
+       if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+           filter->ether_type == RTE_ETHER_TYPE_IPV6) {
+               PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+                       " ethertype filter.", filter->ether_type);
+               return -EINVAL;
+       }
+
+       if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+               PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+               return -EINVAL;
+       }
+       if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+               PMD_DRV_LOG(ERR, "drop option is unsupported.");
+               return -EINVAL;
+       }
+
+       ret = txgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+       if (ret >= 0 && add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+                           filter->ether_type);
+               return -EEXIST;
+       }
+       if (ret < 0 && !add) {
+               PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+                           filter->ether_type);
+               return -ENOENT;
+       }
+
+       if (add) {
+               etqf = TXGBE_ETFLT_ENA;
+               etqf |= TXGBE_ETFLT_ETID(filter->ether_type);
+               etqs |= TXGBE_ETCLS_QPID(filter->queue);
+               etqs |= TXGBE_ETCLS_QENA;
+
+               ethertype_filter.ethertype = filter->ether_type;
+               ethertype_filter.etqf = etqf;
+               ethertype_filter.etqs = etqs;
+               ethertype_filter.conf = FALSE;
+               ret = txgbe_ethertype_filter_insert(filter_info,
+                                                   &ethertype_filter);
+               if (ret < 0) {
+                       PMD_DRV_LOG(ERR, "ethertype filters are full.");
+                       return -ENOSPC;
+               }
+       } else {
+               ret = txgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+               if (ret < 0)
+                       return -ENOSYS;
+       }
+       wr32(hw, TXGBE_ETFLT(ret), etqf);
+       wr32(hw, TXGBE_ETCLS(ret), etqs);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_dev_filter_ctrl(__rte_unused struct rte_eth_dev *dev,
+                    enum rte_filter_type filter_type,
+                    enum rte_filter_op filter_op,
+                    void *arg)
+{
+       int ret = 0;
+
+       switch (filter_type) {
+       case RTE_ETH_FILTER_GENERIC:
+               if (filter_op != RTE_ETH_FILTER_GET)
+                       return -EINVAL;
+               *(const void **)arg = &txgbe_flow_ops;
+               break;
+       default:
+               PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+                                                       filter_type);
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
 static u8 *
 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
                        u8 **mc_addr_ptr, u32 *vmdq)
@@ -4001,6 +4452,63 @@ txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
        return 0;
 }
 
+/* restore n-tuple filter */
+static inline void
+txgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       struct txgbe_5tuple_filter *node;
+
+       TAILQ_FOREACH(node, &filter_info->fivetuple_list, entries) {
+               txgbe_inject_5tuple_filter(dev, node);
+       }
+}
+
+/* restore ethernet type filter */
+static inline void
+txgbe_ethertype_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       int i;
+
+       for (i = 0; i < TXGBE_ETF_ID_MAX; i++) {
+               if (filter_info->ethertype_mask & (1 << i)) {
+                       wr32(hw, TXGBE_ETFLT(i),
+                                       filter_info->ethertype_filters[i].etqf);
+                       wr32(hw, TXGBE_ETCLS(i),
+                                       filter_info->ethertype_filters[i].etqs);
+                       txgbe_flush(hw);
+               }
+       }
+}
+
+/* restore SYN filter */
+static inline void
+txgbe_syn_filter_restore(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
+       uint32_t synqf;
+
+       synqf = filter_info->syn_info;
+
+       if (synqf & TXGBE_SYNCLS_ENA) {
+               wr32(hw, TXGBE_SYNCLS, synqf);
+               txgbe_flush(hw);
+       }
+}
+
+static int
+txgbe_filter_restore(struct rte_eth_dev *dev)
+{
+       txgbe_ntuple_filter_restore(dev);
+       txgbe_ethertype_filter_restore(dev);
+       txgbe_syn_filter_restore(dev);
+
+       return 0;
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_configure              = txgbe_dev_configure,
        .dev_infos_get              = txgbe_dev_info_get,
@@ -4055,6 +4563,7 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .reta_query                 = txgbe_dev_rss_reta_query,
        .rss_hash_update            = txgbe_dev_rss_hash_update,
        .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
+       .filter_ctrl                = txgbe_dev_filter_ctrl,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,