ssows_swtag_untag(ws);
}
-#define R(name, f1, f0, flags) \
+#define R(name, f2, f1, f0, flags) \
static uint16_t __rte_noinline __rte_hot \
ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks) \
{ \
ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
(ev.event >> 20) & 0x7F,
OCCTX_RX_OFFLOAD_NONE |
- OCCTX_RX_MULTI_SEG_F);
+ OCCTX_RX_MULTI_SEG_F,
+ ws->lookup_mem);
else
ev.u64 = get_work1;
dev->txa_enqueue = ssow_txa_enqueue
[!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
- [0]
- [0]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
+ [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
[!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
dev->txa_enqueue_same_dest = dev->txa_enqueue;
/* Assigning dequeue func pointers */
- const event_dequeue_t ssow_deq[2][2] = {
-#define R(name, f1, f0, flags) \
- [f1][f0] = ssows_deq_ ##name,
+ const event_dequeue_t ssow_deq[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_ ##name,
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
dev->dequeue = ssow_deq
[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
- const event_dequeue_burst_t ssow_deq_burst[2][2] = {
-#define R(name, f1, f0, flags) \
- [f1][f0] = ssows_deq_burst_ ##name,
+ const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_burst_ ##name,
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
dev->dequeue_burst = ssow_deq_burst
[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
if (edev->is_timeout_deq) {
- const event_dequeue_t ssow_deq_timeout[2][2] = {
-#define R(name, f1, f0, flags) \
- [f1][f0] = ssows_deq_timeout_ ##name,
+ const event_dequeue_t ssow_deq_timeout[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_ ##name,
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
dev->dequeue = ssow_deq_timeout
[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
- const event_dequeue_burst_t ssow_deq_timeout_burst[2][2] = {
-#define R(name, f1, f0, flags) \
- [f1][f0] = ssows_deq_timeout_burst_ ##name,
+ const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
+#define R(name, f2, f1, f0, flags) \
+ [f2][f1][f0] = ssows_deq_timeout_burst_ ##name,
SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
#undef R
dev->dequeue_burst = ssow_deq_timeout_burst
[!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
+ [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
[!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
}
}
+
+static void
+octeontx_create_rx_ol_flags_array(void *mem)
+{
+ uint16_t idx, errcode, errlev;
+ uint32_t val, *ol_flags;
+
+ /* Skip ptype array memory */
+ ol_flags = (uint32_t *)mem;
+
+ for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
+ errcode = idx & 0xff;
+ errlev = (idx & 0x700) >> 8;
+
+ val = PKT_RX_IP_CKSUM_UNKNOWN;
+ val |= PKT_RX_L4_CKSUM_UNKNOWN;
+ val |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+
+ switch (errlev) {
+ case OCCTX_ERRLEV_RE:
+ if (errcode) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_L4_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LC:
+ if (errcode == OCCTX_EC_IP4_CSUM) {
+ val |= PKT_RX_IP_CKSUM_BAD;
+ val |= PKT_RX_EIP_CKSUM_BAD;
+ } else {
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ }
+ break;
+ case OCCTX_ERRLEV_LD:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LE:
+ if (errcode == OCCTX_EC_IP4_CSUM)
+ val |= PKT_RX_IP_CKSUM_BAD;
+ else
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ break;
+ case OCCTX_ERRLEV_LF:
+ /* Check if parsed packet is neither IPv4 or IPV6 */
+ if (errcode == OCCTX_EC_IP4_NOT)
+ break;
+ val |= PKT_RX_IP_CKSUM_GOOD;
+ if (errcode == OCCTX_EC_L4_CSUM)
+ val |= PKT_RX_L4_CKSUM_BAD;
+ else
+ val |= PKT_RX_L4_CKSUM_GOOD;
+ break;
+ }
+
+ ol_flags[idx] = val;
+ }
+}
+
+void *
+octeontx_fastpath_lookup_mem_get(void)
+{
+ const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
+ const struct rte_memzone *mz;
+ void *mem;
+
+ mz = rte_memzone_lookup(name);
+ if (mz != NULL)
+ return mz->addr;
+
+ /* Request for the first time */
+ mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
+ SOCKET_ID_ANY, 0, OCCTX_ALIGN);
+ if (mz != NULL) {
+ mem = mz->addr;
+ /* Form the rx ol_flags based on errcode */
+ octeontx_create_rx_ol_flags_array(mem);
+ return mem;
+ }
+ return NULL;
+}
#include "ssovf_evdev.h"
#include "octeontx_rxtx.h"
+/* Alignment */
+#define OCCTX_ALIGN 128
+
+/* Fastpath lookup */
+#define OCCTX_FASTPATH_LOOKUP_MEM "octeontx_fastpath_lookup_mem"
+
+/* WQE's ERRCODE + ERRLEV (11 bits) */
+#define ERRCODE_ERRLEN_WIDTH 11
+#define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
+ sizeof(uint32_t))
+
+#define LOOKUP_ARRAY_SZ (ERR_ARRAY_SZ)
+
+#define OCCTX_EC_IP4_NOT 0x41
+#define OCCTX_EC_IP4_CSUM 0x42
+#define OCCTX_EC_L4_CSUM 0x62
+
+enum OCCTX_ERRLEV_E {
+ OCCTX_ERRLEV_RE = 0,
+ OCCTX_ERRLEV_LA = 1,
+ OCCTX_ERRLEV_LB = 2,
+ OCCTX_ERRLEV_LC = 3,
+ OCCTX_ERRLEV_LD = 4,
+ OCCTX_ERRLEV_LE = 5,
+ OCCTX_ERRLEV_LF = 6,
+ OCCTX_ERRLEV_LG = 7,
+};
+
enum {
SSO_SYNC_ORDERED,
SSO_SYNC_ATOMIC,
/* SSO Operations */
+static __rte_always_inline uint32_t
+ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
+{
+ const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
+
+ return ol_flags[(in & 0x7ff)];
+}
+
static __rte_always_inline void
ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
struct rte_mbuf *mbuf)
static __rte_always_inline struct rte_mbuf *
ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
- const uint16_t flag)
+ const uint16_t flag, const void *lookup_mem)
{
struct rte_mbuf *mbuf;
octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
mbuf->ol_flags = 0;
mbuf->pkt_len = wqe->s.w1.len;
+ if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
+ mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
+ wqe->w[2]);
+
if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
mbuf->nb_segs = wqe->s.w0.bufs;
mbuf->data_len = wqe->s.w5.size;
if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
- (ev->event >> 20) & 0x7F, flag);
+ (ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
} else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
ssovf_octeontx_wqe_free(get_work1);
return 0;
/* RX offload macros */
#define VLAN_FLTR_F OCCTX_RX_VLAN_FLTR_F
+#define CSUM_F OCCTX_RX_OFFLOAD_CSUM_F
#define MULT_RX_F OCCTX_RX_MULTI_SEG_F
-/* [VLAN_FLTR][MULTI_SEG] */
+
+/* [VLAN_FLTR] [CSUM_F] [MULTI_SEG] */
#define OCCTX_RX_FASTPATH_MODES \
-R(no_offload, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
-R(mseg, 0, 1, MULT_RX_F) \
-R(vlan, 1, 0, VLAN_FLTR_F) \
-R(vlan_mseg, 1, 1, VLAN_FLTR_F | MULT_RX_F)
+R(no_offload, 0, 0, 0, OCCTX_RX_OFFLOAD_NONE) \
+R(mseg, 0, 0, 1, MULT_RX_F) \
+R(csum, 0, 1, 0, CSUM_F) \
+R(csum_mseg, 0, 1, 1, CSUM_F | MULT_RX_F) \
+R(vlan, 1, 0, 0, VLAN_FLTR_F) \
+R(vlan_mseg, 1, 0, 1, VLAN_FLTR_F | MULT_RX_F) \
+R(vlan_csum, 1, 1, 0, VLAN_FLTR_F | CSUM_F) \
+R(vlan_csum_mseg, 1, 1, 1, CSUM_F | VLAN_FLTR_F | \
+ MULT_RX_F)
#endif /* __OCTEONTX_RXTX_H__ */