event/octeontx2: add Rx adapter
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Thu, 4 Jul 2019 02:19:35 +0000 (07:49 +0530)
committerJerin Jacob <jerinj@marvell.com>
Sun, 7 Jul 2019 13:06:30 +0000 (15:06 +0200)
Add event eth Rx adapter capabilities, queue add and delete functions.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
doc/guides/eventdevs/octeontx2.rst
drivers/event/octeontx2/Makefile
drivers/event/octeontx2/meson.build
drivers/event/octeontx2/otx2_evdev.c
drivers/event/octeontx2/otx2_evdev.h
drivers/event/octeontx2/otx2_evdev_adptr.c

index e5624ba..fad84cf 100644 (file)
@@ -32,6 +32,12 @@ Features of the OCTEON TX2 SSO PMD are:
   time granularity of 2.5us.
 - Up to 256 TIM rings aka event timer adapters.
 - Up to 8 rings traversed in parallel.
+- HW managed packets enqueued from ethdev to eventdev exposed through event eth
+  RX adapter.
+- N:1 ethernet device Rx queue to Event queue mapping.
+- Lockfree Tx from event eth Tx adapter using ``DEV_TX_OFFLOAD_MT_LOCKFREE``
+  capability while maintaining receive packet order.
+- Full Rx/Tx offload support defined through ethdev queue config.
 
 Prerequisites and Compilation procedure
 ---------------------------------------
index aac238b..8289f61 100644 (file)
@@ -43,7 +43,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_selftest.c
 SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_irq.c
 
 LDLIBS += -lrte_eal -lrte_bus_pci -lrte_pci -lrte_kvargs
-LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf
+LDLIBS += -lrte_mempool -lrte_eventdev -lrte_mbuf -lrte_ethdev
 LDLIBS += -lrte_common_octeontx2 -lrte_mempool_octeontx2
 
 include $(RTE_SDK)/mk/rte.lib.mk
index ecd8fd8..807818b 100644 (file)
@@ -31,4 +31,4 @@ foreach flag: extra_flags
        endif
 endforeach
 
-deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2']
+deps += ['bus_pci', 'common_octeontx2', 'mempool_octeontx2', 'pmd_octeontx2']
index 914869b..2956a57 100644 (file)
@@ -1123,6 +1123,10 @@ static struct rte_eventdev_ops otx2_sso_ops = {
        .port_unlink      = otx2_sso_port_unlink,
        .timeout_ticks    = otx2_sso_timeout_ticks,
 
+       .eth_rx_adapter_caps_get  = otx2_sso_rx_adapter_caps_get,
+       .eth_rx_adapter_queue_add = otx2_sso_rx_adapter_queue_add,
+       .eth_rx_adapter_queue_del = otx2_sso_rx_adapter_queue_del,
+
        .timer_adapter_caps_get = otx2_tim_caps_get,
 
        .xstats_get       = otx2_sso_xstats_get,
index ba3aae5..8bee8c7 100644 (file)
@@ -6,9 +6,12 @@
 #define __OTX2_EVDEV_H__
 
 #include <rte_eventdev.h>
+#include <rte_eventdev_pmd.h>
+#include <rte_event_eth_rx_adapter.h>
 
 #include "otx2_common.h"
 #include "otx2_dev.h"
+#include "otx2_ethdev.h"
 #include "otx2_mempool.h"
 
 #define EVENTDEV_NAME_OCTEONTX2_PMD otx2_eventdev
@@ -249,6 +252,18 @@ void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
                      uint32_t event_type);
 int sso_xae_reconfigure(struct rte_eventdev *event_dev);
 void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
+
+int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+                                const struct rte_eth_dev *eth_dev,
+                                uint32_t *caps);
+int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+                                 const struct rte_eth_dev *eth_dev,
+                                 int32_t rx_queue_id,
+               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
+int otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+                                 const struct rte_eth_dev *eth_dev,
+                                 int32_t rx_queue_id);
+
 /* Clean up API's */
 typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
 void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
index 810722f..ce5621f 100644 (file)
@@ -4,6 +4,197 @@
 
 #include "otx2_evdev.h"
 
+int
+otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
+                            const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+       int rc;
+
+       RTE_SET_USED(event_dev);
+       rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+       if (rc)
+               *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+       else
+               *caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+       return 0;
+}
+
+static inline int
+sso_rxq_enable(struct otx2_eth_dev *dev, uint16_t qid, uint8_t tt, uint8_t ggrp,
+              uint16_t eth_port_id)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *aq;
+       int rc;
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_CQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->cq.ena = 0;
+       aq->cq.caching = 0;
+
+       otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+       aq->cq_mask.ena = ~(aq->cq_mask.ena);
+       aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0) {
+               otx2_err("Failed to disable cq context");
+               goto fail;
+       }
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_RQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->rq.sso_ena = 1;
+       aq->rq.sso_tt = tt;
+       aq->rq.sso_grp = ggrp;
+       aq->rq.ena_wqwd = 1;
+       /* Mbuf Header generation :
+        * > FIRST_SKIP is a super set of WQE_SKIP, dont modify first skip as
+        * it already has data related to mbuf size, headroom, private area.
+        * > Using WQE_SKIP we can directly assign
+        *              mbuf = wqe - sizeof(struct mbuf);
+        * so that mbuf header will not have unpredicted values while headroom
+        * and private data starts at the beginning of wqe_data.
+        */
+       aq->rq.wqe_skip = 1;
+       aq->rq.wqe_caching = 1;
+       aq->rq.spb_ena = 0;
+       aq->rq.flow_tagw = 20; /* 20-bits */
+
+       /* Flow Tag calculation :
+        *
+        * rq_tag <31:24> = good/bad_tag<8:0>;
+        * rq_tag  <23:0> = [ltag]
+        *
+        * flow_tag_mask<31:0> =  (1 << flow_tagw) - 1; <31:20>
+        * tag<31:0> = (~flow_tag_mask & rq_tag) | (flow_tag_mask & flow_tag);
+        *
+        * Setup :
+        * ltag<23:0> = (eth_port_id & 0xF) << 20;
+        * good/bad_tag<8:0> =
+        *      ((eth_port_id >> 4) & 0xF) | (RTE_EVENT_TYPE_ETHDEV << 4);
+        *
+        * TAG<31:0> on getwork = <31:28>(RTE_EVENT_TYPE_ETHDEV) |
+        *                              <27:20> (eth_port_id) | <20:0> [TAG]
+        */
+
+       aq->rq.ltag = (eth_port_id & 0xF) << 20;
+       aq->rq.good_utag = ((eth_port_id >> 4) & 0xF) |
+                               (RTE_EVENT_TYPE_ETHDEV << 4);
+       aq->rq.bad_utag = aq->rq.good_utag;
+
+       aq->rq.ena = 0;          /* Don't enable RQ yet */
+       aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+       aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+       otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+       /* mask the bits to write. */
+       aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
+       aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
+       aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
+       aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
+       aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
+       aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
+       aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
+       aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
+       aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
+       aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
+       aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
+       aq->rq_mask.ena          = ~(aq->rq_mask.ena);
+       aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
+       aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0) {
+               otx2_err("Failed to init rx adapter context");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       return rc;
+}
+
+static inline int
+sso_rxq_disable(struct otx2_eth_dev *dev, uint16_t qid)
+{
+       struct otx2_mbox *mbox = dev->mbox;
+       struct nix_aq_enq_req *aq;
+       int rc;
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_CQ;
+       aq->op = NIX_AQ_INSTOP_INIT;
+
+       aq->cq.ena = 1;
+       aq->cq.caching = 1;
+
+       otx2_mbox_memset(&aq->cq_mask, 0, sizeof(struct nix_cq_ctx_s));
+       aq->cq_mask.ena = ~(aq->cq_mask.ena);
+       aq->cq_mask.caching = ~(aq->cq_mask.caching);
+
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0) {
+               otx2_err("Failed to init cq context");
+               goto fail;
+       }
+
+       aq = otx2_mbox_alloc_msg_nix_aq_enq(mbox);
+       aq->qidx = qid;
+       aq->ctype = NIX_AQ_CTYPE_RQ;
+       aq->op = NIX_AQ_INSTOP_WRITE;
+
+       aq->rq.sso_ena = 0;
+       aq->rq.sso_tt = SSO_TT_UNTAGGED;
+       aq->rq.sso_grp = 0;
+       aq->rq.ena_wqwd = 0;
+       aq->rq.wqe_caching = 0;
+       aq->rq.wqe_skip = 0;
+       aq->rq.spb_ena = 0;
+       aq->rq.flow_tagw = 0x20;
+       aq->rq.ltag = 0;
+       aq->rq.good_utag = 0;
+       aq->rq.bad_utag = 0;
+       aq->rq.ena = 1;
+       aq->rq.pb_caching = 0x2; /* First cache aligned block to LLC */
+       aq->rq.xqe_imm_size = 0; /* No pkt data copy to CQE */
+
+       otx2_mbox_memset(&aq->rq_mask, 0, sizeof(struct nix_rq_ctx_s));
+       /* mask the bits to write. */
+       aq->rq_mask.sso_ena      = ~(aq->rq_mask.sso_ena);
+       aq->rq_mask.sso_tt       = ~(aq->rq_mask.sso_tt);
+       aq->rq_mask.sso_grp      = ~(aq->rq_mask.sso_grp);
+       aq->rq_mask.ena_wqwd     = ~(aq->rq_mask.ena_wqwd);
+       aq->rq_mask.wqe_caching  = ~(aq->rq_mask.wqe_caching);
+       aq->rq_mask.wqe_skip     = ~(aq->rq_mask.wqe_skip);
+       aq->rq_mask.spb_ena      = ~(aq->rq_mask.spb_ena);
+       aq->rq_mask.flow_tagw    = ~(aq->rq_mask.flow_tagw);
+       aq->rq_mask.ltag         = ~(aq->rq_mask.ltag);
+       aq->rq_mask.good_utag    = ~(aq->rq_mask.good_utag);
+       aq->rq_mask.bad_utag     = ~(aq->rq_mask.bad_utag);
+       aq->rq_mask.ena          = ~(aq->rq_mask.ena);
+       aq->rq_mask.pb_caching   = ~(aq->rq_mask.pb_caching);
+       aq->rq_mask.xqe_imm_size = ~(aq->rq_mask.xqe_imm_size);
+
+       rc = otx2_mbox_process(mbox);
+       if (rc < 0) {
+               otx2_err("Failed to clear rx adapter context");
+               goto fail;
+       }
+
+       return 0;
+fail:
+       return rc;
+}
+
 void
 sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
 {
@@ -17,3 +208,66 @@ sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
                break;
        }
 }
+
+int
+otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t rx_queue_id,
+               const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+       struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
+       uint16_t port = eth_dev->data->port_id;
+       int i, rc;
+
+       RTE_SET_USED(event_dev);
+       rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+       if (rc)
+               return -EINVAL;
+
+       if (rx_queue_id < 0) {
+               for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++) {
+                       rc |= sso_rxq_enable(otx2_eth_dev, i,
+                                            queue_conf->ev.sched_type,
+                                            queue_conf->ev.queue_id, port);
+               }
+       } else {
+               rc |= sso_rxq_enable(otx2_eth_dev, (uint16_t)rx_queue_id,
+                                    queue_conf->ev.sched_type,
+                                    queue_conf->ev.queue_id, port);
+       }
+
+       if (rc < 0) {
+               otx2_err("Failed to configure Rx adapter port=%d, q=%d", port,
+                        queue_conf->ev.queue_id);
+               return rc;
+       }
+
+       return 0;
+}
+
+int
+otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
+                             const struct rte_eth_dev *eth_dev,
+                             int32_t rx_queue_id)
+{
+       struct otx2_eth_dev *dev = eth_dev->data->dev_private;
+       int i, rc;
+
+       RTE_SET_USED(event_dev);
+       rc = strncmp(eth_dev->device->driver->name, "net_octeontx2", 13);
+       if (rc)
+               return -EINVAL;
+
+       if (rx_queue_id < 0) {
+               for (i = 0 ; i < eth_dev->data->nb_rx_queues; i++)
+                       rc = sso_rxq_disable(dev, i);
+       } else {
+               rc = sso_rxq_disable(dev, (uint16_t)rx_queue_id);
+       }
+
+       if (rc < 0)
+               otx2_err("Failed to clear Rx adapter config port=%d, q=%d",
+                        eth_dev->data->port_id, rx_queue_id);
+
+       return rc;
+}